diff --git a/.azure-pipelines/azure-pipelines.yml b/.azure-pipelines/azure-pipelines.yml
index 60b9e31023..e9bfa6f8e4 100644
--- a/.azure-pipelines/azure-pipelines.yml
+++ b/.azure-pipelines/azure-pipelines.yml
@@ -29,22 +29,20 @@ schedules:
always: true
branches:
include:
- - stable-5
- - stable-4
+ - stable-11
+ - stable-10
- cron: 0 11 * * 0
displayName: Weekly (old stable branches)
always: true
branches:
include:
- - stable-3
+ - stable-9
variables:
- name: checkoutPath
value: ansible_collections/community/general
- name: coverageBranches
value: main
- - name: pipelinesCoverage
- value: coverage
- name: entryPoint
value: tests/utils/shippable/shippable.sh
- name: fetchDepth
@@ -53,7 +51,7 @@ variables:
resources:
containers:
- container: default
- image: quay.io/ansible/azure-pipelines-test-container:3.0.0
+ image: quay.io/ansible/azure-pipelines-test-container:7.0.0
pool: Standard
@@ -72,41 +70,40 @@ stages:
- test: 2
- test: 3
- test: 4
- - test: extra
- - stage: Sanity_2_13
- displayName: Sanity 2.13
+ - stage: Sanity_2_20
+ displayName: Sanity 2.20
dependsOn: []
jobs:
- template: templates/matrix.yml
parameters:
nameFormat: Test {0}
- testFormat: 2.13/sanity/{0}
+ testFormat: 2.20/sanity/{0}
targets:
- test: 1
- test: 2
- test: 3
- test: 4
- - stage: Sanity_2_12
- displayName: Sanity 2.12
+ - stage: Sanity_2_19
+ displayName: Sanity 2.19
dependsOn: []
jobs:
- template: templates/matrix.yml
parameters:
nameFormat: Test {0}
- testFormat: 2.12/sanity/{0}
+ testFormat: 2.19/sanity/{0}
targets:
- test: 1
- test: 2
- test: 3
- test: 4
- - stage: Sanity_2_11
- displayName: Sanity 2.11
+ - stage: Sanity_2_18
+ displayName: Sanity 2.18
dependsOn: []
jobs:
- template: templates/matrix.yml
parameters:
nameFormat: Test {0}
- testFormat: 2.11/sanity/{0}
+ testFormat: 2.18/sanity/{0}
targets:
- test: 1
- test: 2
@@ -122,54 +119,68 @@ stages:
nameFormat: Python {0}
testFormat: devel/units/{0}/1
targets:
- - test: 2.7
- - test: 3.5
- - test: 3.6
- - test: 3.7
- - test: 3.8
- test: 3.9
- test: '3.10'
- - stage: Units_2_13
- displayName: Units 2.13
+ - test: '3.11'
+ - test: '3.12'
+ - test: '3.13'
+ - test: '3.14'
+ - stage: Units_2_20
+ displayName: Units 2.20
dependsOn: []
jobs:
- template: templates/matrix.yml
parameters:
nameFormat: Python {0}
- testFormat: 2.13/units/{0}/1
+ testFormat: 2.20/units/{0}/1
targets:
- - test: 2.7
- - test: 3.6
- - test: 3.8
- test: 3.9
- - stage: Units_2_12
- displayName: Units 2.12
+ - test: "3.12"
+ - test: "3.14"
+ - stage: Units_2_19
+ displayName: Units 2.19
dependsOn: []
jobs:
- template: templates/matrix.yml
parameters:
nameFormat: Python {0}
- testFormat: 2.12/units/{0}/1
+ testFormat: 2.19/units/{0}/1
targets:
- - test: 2.6
- - test: 3.5
- test: 3.8
- - stage: Units_2_11
- displayName: Units 2.11
+ - test: "3.11"
+ - test: "3.13"
+ - stage: Units_2_18
+ displayName: Units 2.18
dependsOn: []
jobs:
- template: templates/matrix.yml
parameters:
nameFormat: Python {0}
- testFormat: 2.11/units/{0}/1
+ testFormat: 2.18/units/{0}/1
targets:
- - test: 2.6
- - test: 2.7
- - test: 3.5
- - test: 3.6
- - test: 3.9
+ - test: 3.8
+ - test: "3.11"
+ - test: "3.13"
## Remote
+ - stage: Remote_devel_extra_vms
+ displayName: Remote devel extra VMs
+ dependsOn: []
+ jobs:
+ - template: templates/matrix.yml
+ parameters:
+ testFormat: devel/{0}
+ targets:
+ - name: Alpine 3.22
+ test: alpine/3.22
+ # - name: Fedora 42
+ # test: fedora/42
+ - name: Ubuntu 22.04
+ test: ubuntu/22.04
+ - name: Ubuntu 24.04
+ test: ubuntu/24.04
+ groups:
+ - vm
- stage: Remote_devel
displayName: Remote devel
dependsOn: []
@@ -178,68 +189,68 @@ stages:
parameters:
testFormat: devel/{0}
targets:
- - name: macOS 12.0
- test: macos/12.0
- - name: RHEL 7.9
- test: rhel/7.9
- - name: RHEL 9.0
- test: rhel/9.0
- - name: FreeBSD 12.3
- test: freebsd/12.3
- - name: FreeBSD 13.1
- test: freebsd/13.1
+ - name: macOS 15.3
+ test: macos/15.3
+ - name: RHEL 10.0
+ test: rhel/10.0
+ - name: RHEL 9.6
+ test: rhel/9.6
+ - name: FreeBSD 14.3
+ test: freebsd/14.3
+ - name: FreeBSD 13.5
+ test: freebsd/13.5
groups:
- 1
- 2
- 3
- - stage: Remote_2_13
- displayName: Remote 2.13
+ - stage: Remote_2_20
+ displayName: Remote 2.20
dependsOn: []
jobs:
- template: templates/matrix.yml
parameters:
- testFormat: 2.13/{0}
+ testFormat: 2.20/{0}
targets:
- - name: macOS 12.0
- test: macos/12.0
- - name: RHEL 8.5
- test: rhel/8.5
+ - name: RHEL 10.0
+ test: rhel/10.0
+ - name: FreeBSD 14.3
+ test: freebsd/14.3
groups:
- 1
- 2
- 3
- - stage: Remote_2_12
- displayName: Remote 2.12
+ - stage: Remote_2_19
+ displayName: Remote 2.19
dependsOn: []
jobs:
- template: templates/matrix.yml
parameters:
- testFormat: 2.12/{0}
+ testFormat: 2.19/{0}
targets:
- - name: macOS 11.1
- test: macos/11.1
- - name: RHEL 8.4
- test: rhel/8.4
- - name: FreeBSD 13.0
- test: freebsd/13.0
+ - name: RHEL 9.5
+ test: rhel/9.5
+ - name: RHEL 10.0
+ test: rhel/10.0
+ - name: FreeBSD 14.2
+ test: freebsd/14.2
groups:
- 1
- 2
- 3
- - stage: Remote_2_11
- displayName: Remote 2.11
+ - stage: Remote_2_18
+ displayName: Remote 2.18
dependsOn: []
jobs:
- template: templates/matrix.yml
parameters:
- testFormat: 2.11/{0}
+ testFormat: 2.18/{0}
targets:
- - name: RHEL 7.9
- test: rhel/7.9
- - name: RHEL 8.3
- test: rhel/8.3
- #- name: FreeBSD 12.2
- # test: freebsd/12.2
+ - name: macOS 14.3
+ test: macos/14.3
+ - name: RHEL 9.4
+ test: rhel/9.4
+ - name: FreeBSD 14.1
+ test: freebsd/14.1
groups:
- 1
- 2
@@ -254,72 +265,64 @@ stages:
parameters:
testFormat: devel/linux/{0}
targets:
- - name: CentOS 7
- test: centos7
- - name: Fedora 36
- test: fedora36
- - name: openSUSE 15
- test: opensuse15
- - name: Ubuntu 20.04
- test: ubuntu2004
+ - name: Fedora 42
+ test: fedora42
+ - name: Alpine 3.22
+ test: alpine322
- name: Ubuntu 22.04
test: ubuntu2204
- - name: Alpine 3
- test: alpine3
+ - name: Ubuntu 24.04
+ test: ubuntu2404
groups:
- 1
- 2
- 3
- - stage: Docker_2_13
- displayName: Docker 2.13
+ - stage: Docker_2_20
+ displayName: Docker 2.20
dependsOn: []
jobs:
- template: templates/matrix.yml
parameters:
- testFormat: 2.13/linux/{0}
+ testFormat: 2.20/linux/{0}
targets:
- - name: Fedora 35
- test: fedora35
- - name: openSUSE 15 py2
- test: opensuse15py2
- - name: Alpine 3
- test: alpine3
+ - name: Fedora 42
+ test: fedora42
+ - name: Alpine 3.22
+ test: alpine322
groups:
- 1
- 2
- 3
- - stage: Docker_2_12
- displayName: Docker 2.12
+ - stage: Docker_2_19
+ displayName: Docker 2.19
dependsOn: []
jobs:
- template: templates/matrix.yml
parameters:
- testFormat: 2.12/linux/{0}
+ testFormat: 2.19/linux/{0}
targets:
- - name: CentOS 6
- test: centos6
- - name: Fedora 34
- test: fedora34
- - name: Ubuntu 18.04
- test: ubuntu1804
+ - name: Fedora 41
+ test: fedora41
+ - name: Alpine 3.21
+ test: alpine321
groups:
- 1
- 2
- 3
- - stage: Docker_2_11
- displayName: Docker 2.11
+ - stage: Docker_2_18
+ displayName: Docker 2.18
dependsOn: []
jobs:
- template: templates/matrix.yml
parameters:
- testFormat: 2.11/linux/{0}
+ testFormat: 2.18/linux/{0}
targets:
- - name: Fedora 32
- test: fedora32
- - name: Fedora 33
- test: fedora33
- - name: Alpine 3
- test: alpine3
+ - name: Fedora 40
+ test: fedora40
+ - name: Alpine 3.20
+ test: alpine320
+ - name: Ubuntu 24.04
+ test: ubuntu2404
groups:
- 1
- 2
@@ -334,84 +337,92 @@ stages:
parameters:
testFormat: devel/linux-community/{0}
targets:
- - name: Debian Bullseye
+ - name: Debian 11 Bullseye
test: debian-bullseye/3.9
+ - name: Debian 12 Bookworm
+ test: debian-bookworm/3.11
+ - name: Debian 13 Trixie
+ test: debian-13-trixie/3.13
- name: ArchLinux
- test: archlinux/3.10
- - name: CentOS Stream 8
- test: centos-stream8/3.8
+ test: archlinux/3.13
groups:
- 1
- 2
- 3
-### Cloud
- - stage: Cloud_devel
- displayName: Cloud devel
- dependsOn: []
- jobs:
- - template: templates/matrix.yml
- parameters:
- nameFormat: Python {0}
- testFormat: devel/cloud/{0}/1
- targets:
- - test: 2.7
- - test: '3.10'
- - stage: Cloud_2_13
- displayName: Cloud 2.13
- dependsOn: []
- jobs:
- - template: templates/matrix.yml
- parameters:
- nameFormat: Python {0}
- testFormat: 2.13/cloud/{0}/1
- targets:
- - test: 3.9
- - stage: Cloud_2_12
- displayName: Cloud 2.12
- dependsOn: []
- jobs:
- - template: templates/matrix.yml
- parameters:
- nameFormat: Python {0}
- testFormat: 2.12/cloud/{0}/1
- targets:
- - test: 3.8
- - stage: Cloud_2_11
- displayName: Cloud 2.11
- dependsOn: []
- jobs:
- - template: templates/matrix.yml
- parameters:
- nameFormat: Python {0}
- testFormat: 2.11/cloud/{0}/1
- targets:
- - test: 2.7
- - test: 3.5
+### Generic
+# Right now all generic tests are disabled. Uncomment when at least one of them is re-enabled.
+# - stage: Generic_devel
+# displayName: Generic devel
+# dependsOn: []
+# jobs:
+# - template: templates/matrix.yml
+# parameters:
+# nameFormat: Python {0}
+# testFormat: devel/generic/{0}/1
+# targets:
+# - test: '3.9'
+# - test: '3.12'
+# - test: '3.14'
+# - stage: Generic_2_20
+# displayName: Generic 2.20
+# dependsOn: []
+# jobs:
+# - template: templates/matrix.yml
+# parameters:
+# nameFormat: Python {0}
+# testFormat: 2.20/generic/{0}/1
+# targets:
+# - test: '3.10'
+# - test: '3.14'
+# - stage: Generic_2_19
+# displayName: Generic 2.19
+# dependsOn: []
+# jobs:
+# - template: templates/matrix.yml
+# parameters:
+# nameFormat: Python {0}
+# testFormat: 2.19/generic/{0}/1
+# targets:
+# - test: '3.9'
+# - test: '3.13'
+# - stage: Generic_2_18
+# displayName: Generic 2.18
+# dependsOn: []
+# jobs:
+# - template: templates/matrix.yml
+# parameters:
+# nameFormat: Python {0}
+# testFormat: 2.18/generic/{0}/1
+# targets:
+# - test: '3.8'
+# - test: '3.13'
- stage: Summary
condition: succeededOrFailed()
dependsOn:
- Sanity_devel
- - Sanity_2_11
- - Sanity_2_12
- - Sanity_2_13
+ - Sanity_2_20
+ - Sanity_2_19
+ - Sanity_2_18
- Units_devel
- - Units_2_11
- - Units_2_12
- - Units_2_13
+ - Units_2_20
+ - Units_2_19
+ - Units_2_18
+ - Remote_devel_extra_vms
- Remote_devel
- - Remote_2_11
- - Remote_2_12
- - Remote_2_13
+ - Remote_2_20
+ - Remote_2_19
+ - Remote_2_18
- Docker_devel
- - Docker_2_11
- - Docker_2_12
- - Docker_2_13
+ - Docker_2_20
+ - Docker_2_19
+ - Docker_2_18
- Docker_community_devel
- - Cloud_devel
- - Cloud_2_11
- - Cloud_2_12
- - Cloud_2_13
+# Right now all generic tests are disabled. Uncomment when at least one of them is re-enabled.
+# - Generic_devel
+# - Generic_2_20
+# - Generic_2_19
+# - Generic_2_18
jobs:
- template: templates/coverage.yml
diff --git a/.azure-pipelines/templates/coverage.yml b/.azure-pipelines/templates/coverage.yml
index 3c8841aa26..1bf17e053a 100644
--- a/.azure-pipelines/templates/coverage.yml
+++ b/.azure-pipelines/templates/coverage.yml
@@ -28,16 +28,6 @@ jobs:
- bash: .azure-pipelines/scripts/report-coverage.sh
displayName: Generate Coverage Report
condition: gt(variables.coverageFileCount, 0)
- - task: PublishCodeCoverageResults@1
- inputs:
- codeCoverageTool: Cobertura
- # Azure Pipelines only accepts a single coverage data file.
- # That means only Python or PowerShell coverage can be uploaded, but not both.
- # Set the "pipelinesCoverage" variable to determine which type is uploaded.
- # Use "coverage" for Python and "coverage-powershell" for PowerShell.
- summaryFileLocation: "$(outputPath)/reports/$(pipelinesCoverage).xml"
- displayName: Publish to Azure Pipelines
- condition: gt(variables.coverageFileCount, 0)
- bash: .azure-pipelines/scripts/publish-codecov.py "$(outputPath)"
displayName: Publish to codecov.io
condition: gt(variables.coverageFileCount, 0)
diff --git a/.azure-pipelines/templates/matrix.yml b/.azure-pipelines/templates/matrix.yml
index 4876375855..49f5d8595a 100644
--- a/.azure-pipelines/templates/matrix.yml
+++ b/.azure-pipelines/templates/matrix.yml
@@ -50,11 +50,11 @@ jobs:
parameters:
jobs:
- ${{ if eq(length(parameters.groups), 0) }}:
- - ${{ each target in parameters.targets }}:
- - name: ${{ format(parameters.nameFormat, coalesce(target.name, target.test)) }}
- test: ${{ format(parameters.testFormat, coalesce(target.test, target.name)) }}
- - ${{ if not(eq(length(parameters.groups), 0)) }}:
- - ${{ each group in parameters.groups }}:
- ${{ each target in parameters.targets }}:
- - name: ${{ format(format(parameters.nameGroupFormat, parameters.nameFormat), coalesce(target.name, target.test), group) }}
- test: ${{ format(format(parameters.testGroupFormat, parameters.testFormat), coalesce(target.test, target.name), group) }}
+ - name: ${{ format(parameters.nameFormat, coalesce(target.name, target.test)) }}
+ test: ${{ format(parameters.testFormat, coalesce(target.test, target.name)) }}
+ - ${{ if not(eq(length(parameters.groups), 0)) }}:
+ - ${{ each group in parameters.groups }}:
+ - ${{ each target in parameters.targets }}:
+ - name: ${{ format(format(parameters.nameGroupFormat, parameters.nameFormat), coalesce(target.name, target.test), group) }}
+ test: ${{ format(format(parameters.testGroupFormat, parameters.testFormat), coalesce(target.test, target.name), group) }}
diff --git a/.azure-pipelines/templates/test.yml b/.azure-pipelines/templates/test.yml
index 700cf629d7..b263379c06 100644
--- a/.azure-pipelines/templates/test.yml
+++ b/.azure-pipelines/templates/test.yml
@@ -14,37 +14,37 @@ parameters:
jobs:
- ${{ each job in parameters.jobs }}:
- - job: test_${{ replace(replace(replace(job.test, '/', '_'), '.', '_'), '-', '_') }}
- displayName: ${{ job.name }}
- container: default
- workspace:
- clean: all
- steps:
- - checkout: self
- fetchDepth: $(fetchDepth)
- path: $(checkoutPath)
- - bash: .azure-pipelines/scripts/run-tests.sh "$(entryPoint)" "${{ job.test }}" "$(coverageBranches)"
- displayName: Run Tests
- - bash: .azure-pipelines/scripts/process-results.sh
- condition: succeededOrFailed()
- displayName: Process Results
- - bash: .azure-pipelines/scripts/aggregate-coverage.sh "$(Agent.TempDirectory)"
- condition: eq(variables.haveCoverageData, 'true')
- displayName: Aggregate Coverage Data
- - task: PublishTestResults@2
- condition: eq(variables.haveTestResults, 'true')
- inputs:
- testResultsFiles: "$(outputPath)/junit/*.xml"
- displayName: Publish Test Results
- - task: PublishPipelineArtifact@1
- condition: eq(variables.haveBotResults, 'true')
- displayName: Publish Bot Results
- inputs:
- targetPath: "$(outputPath)/bot/"
- artifactName: "Bot $(System.JobAttempt) $(System.StageDisplayName) $(System.JobDisplayName)"
- - task: PublishPipelineArtifact@1
- condition: eq(variables.haveCoverageData, 'true')
- displayName: Publish Coverage Data
- inputs:
- targetPath: "$(Agent.TempDirectory)/coverage/"
- artifactName: "Coverage $(System.JobAttempt) $(System.StageDisplayName) $(System.JobDisplayName)"
+ - job: test_${{ replace(replace(replace(job.test, '/', '_'), '.', '_'), '-', '_') }}
+ displayName: ${{ job.name }}
+ container: default
+ workspace:
+ clean: all
+ steps:
+ - checkout: self
+ fetchDepth: $(fetchDepth)
+ path: $(checkoutPath)
+ - bash: .azure-pipelines/scripts/run-tests.sh "$(entryPoint)" "${{ job.test }}" "$(coverageBranches)"
+ displayName: Run Tests
+ - bash: .azure-pipelines/scripts/process-results.sh
+ condition: succeededOrFailed()
+ displayName: Process Results
+ - bash: .azure-pipelines/scripts/aggregate-coverage.sh "$(Agent.TempDirectory)"
+ condition: eq(variables.haveCoverageData, 'true')
+ displayName: Aggregate Coverage Data
+ - task: PublishTestResults@2
+ condition: eq(variables.haveTestResults, 'true')
+ inputs:
+ testResultsFiles: "$(outputPath)/junit/*.xml"
+ displayName: Publish Test Results
+ - task: PublishPipelineArtifact@1
+ condition: eq(variables.haveBotResults, 'true')
+ displayName: Publish Bot Results
+ inputs:
+ targetPath: "$(outputPath)/bot/"
+ artifactName: "Bot $(System.JobAttempt) $(System.StageDisplayName) $(System.JobDisplayName)"
+ - task: PublishPipelineArtifact@1
+ condition: eq(variables.haveCoverageData, 'true')
+ displayName: Publish Coverage Data
+ inputs:
+ targetPath: "$(Agent.TempDirectory)/coverage/"
+ artifactName: "Coverage $(System.JobAttempt) $(System.StageDisplayName) $(System.JobDisplayName)"
diff --git a/.git-blame-ignore-revs b/.git-blame-ignore-revs
new file mode 100644
index 0000000000..cd4bdfee65
--- /dev/null
+++ b/.git-blame-ignore-revs
@@ -0,0 +1,9 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# YAML reformatting
+d032de3b16eed11ea3a31cd3d96d78f7c46a2ee0
+e8f965fbf8154ea177c6622da149f2ae8533bd3c
+e938ca5f20651abc160ee6aba10014013d04dcc1
+eaa5e07b2866e05b6c7b5628ca92e9cb1142d008
diff --git a/.github/BOTMETA.yml b/.github/BOTMETA.yml
index 5ae2935209..d9d291f3b1 100644
--- a/.github/BOTMETA.yml
+++ b/.github/BOTMETA.yml
@@ -13,9 +13,9 @@ files:
support: community
$actions:
labels: action
- $actions/system/iptables_state.py:
+ $actions/iptables_state.py:
maintainers: quidame
- $actions/system/shutdown.py:
+ $actions/shutdown.py:
maintainers: nitzmahone samdoran aminvakil
$becomes/:
labels: become
@@ -33,6 +33,8 @@ files:
maintainers: $team_ansible_core
$becomes/pmrun.py:
maintainers: $team_ansible_core
+ $becomes/run0.py:
+ maintainers: konstruktoid
$becomes/sesu.py:
maintainers: nekonyuu
$becomes/sudosu.py:
@@ -50,20 +52,21 @@ files:
$callbacks/cgroup_memory_recap.py: {}
$callbacks/context_demo.py: {}
$callbacks/counter_enabled.py: {}
+ $callbacks/default_without_diff.py:
+ maintainers: felixfontein
$callbacks/dense.py:
maintainers: dagwieers
$callbacks/diy.py:
maintainers: theque5t
$callbacks/elastic.py:
- maintainers: v1v
keywords: apm observability
- $callbacks/hipchat.py: {}
+ maintainers: v1v
$callbacks/jabber.py: {}
+ $callbacks/log_plays.py: {}
$callbacks/loganalytics.py:
maintainers: zhcli
$callbacks/logdna.py: {}
$callbacks/logentries.py: {}
- $callbacks/log_plays.py: {}
$callbacks/logstash.py:
maintainers: ujenmr
$callbacks/mail.py:
@@ -72,57 +75,73 @@ files:
maintainers: rverchere
$callbacks/null.py: {}
$callbacks/opentelemetry.py:
- maintainers: v1v
keywords: opentelemetry observability
+ maintainers: v1v
+ $callbacks/print_task.py:
+ maintainers: demonpig
$callbacks/say.py:
- notify: chris-short
- maintainers: $team_macos
- labels: macos say
keywords: brew cask darwin homebrew macosx macports osx
+ labels: macos say
+ maintainers: $team_macos
+ notify: chris-short
$callbacks/selective.py: {}
$callbacks/slack.py: {}
$callbacks/splunk.py: {}
$callbacks/sumologic.py:
- maintainers: ryancurrah
labels: sumologic
+ maintainers: ryancurrah
$callbacks/syslog_json.py:
maintainers: imjoseangel
+ $callbacks/tasks_only.py:
+ maintainers: felixfontein
+ $callbacks/timestamp.py:
+ maintainers: kurokobo
$callbacks/unixy.py:
- maintainers: akatch
labels: unixy
- $callbacks/yaml.py: {}
+ maintainers: akatch
$connections/:
labels: connections
$connections/chroot.py: {}
$connections/funcd.py:
maintainers: mscherer
$connections/iocage.py: {}
+ $connections/incus.py:
+ labels: incus
+ maintainers: stgraber
$connections/jail.py:
maintainers: $team_ansible_core
$connections/lxc.py: {}
$connections/lxd.py:
- maintainers: mattclay
labels: lxd
+ maintainers: mattclay
$connections/qubes.py:
maintainers: kushaldas
$connections/saltstack.py:
- maintainers: mscherer
labels: saltstack
+ maintainers: mscherer
+ $connections/wsl.py:
+ maintainers: rgl
$connections/zone.py:
maintainers: $team_ansible_core
$doc_fragments/:
labels: docs_fragments
+ $doc_fragments/django.py:
+ maintainers: russoz
$doc_fragments/hpe3par.py:
- maintainers: farhan7500 gautamphegde
labels: hpe3par
+ maintainers: farhan7500 gautamphegde
$doc_fragments/hwc.py:
- maintainers: $team_huawei
labels: hwc
+ maintainers: $team_huawei
$doc_fragments/nomad.py:
- maintainers: chris93111
+ maintainers: chris93111 apecnascimento
+ $doc_fragments/pipx.py:
+ maintainers: russoz
$doc_fragments/xenserver.py:
- maintainers: bvitnik
labels: xenserver
+ maintainers: bvitnik
+ $filters/accumulate.py:
+ maintainers: VannTen
$filters/counter.py:
maintainers: keilr
$filters/crc32.py:
@@ -133,6 +152,8 @@ files:
maintainers: giner
$filters/from_csv.py:
maintainers: Ajpantuso
+ $filters/from_ini.py:
+ maintainers: sscheib
$filters/groupby_as_dict.py:
maintainers: felixfontein
$filters/hashids.py:
@@ -143,32 +164,68 @@ files:
maintainers: Ajpantuso
$filters/jc.py:
maintainers: kellyjonbrazil
+ $filters/json_diff.yml:
+ maintainers: numo68
+ $filters/json_patch.py:
+ maintainers: numo68
+ $filters/json_patch.yml:
+ maintainers: numo68
+ $filters/json_patch_recipe.yml:
+ maintainers: numo68
$filters/json_query.py: {}
+ $filters/keep_keys.py:
+ maintainers: vbotka
+ $filters/lists.py:
+ maintainers: cfiehe
+ $filters/lists_difference.yml:
+ maintainers: cfiehe
+ $filters/lists_intersect.yml:
+ maintainers: cfiehe
$filters/lists_mergeby.py:
maintainers: vbotka
+ $filters/lists_symmetric_difference.yml:
+ maintainers: cfiehe
+ $filters/lists_union.yml:
+ maintainers: cfiehe
$filters/random_mac.py: {}
+ $filters/remove_keys.py:
+ maintainers: vbotka
+ $filters/replace_keys.py:
+ maintainers: vbotka
+ $filters/reveal_ansible_type.py:
+ maintainers: vbotka
$filters/time.py:
maintainers: resmo
- $filters/unicode_normalize.py:
- maintainers: Ajpantuso
$filters/to_days.yml:
maintainers: resmo
$filters/to_hours.yml:
maintainers: resmo
+ $filters/to_ini.py:
+ maintainers: sscheib
$filters/to_milliseconds.yml:
maintainers: resmo
$filters/to_minutes.yml:
maintainers: resmo
$filters/to_months.yml:
maintainers: resmo
+ $filters/to_nice_yaml.yml:
+ maintainers: felixfontein
+ $filters/to_prettytable.py:
+ maintainers: tgadiev
$filters/to_seconds.yml:
maintainers: resmo
$filters/to_time_unit.yml:
maintainers: resmo
$filters/to_weeks.yml:
maintainers: resmo
+ $filters/to_yaml.py:
+ maintainers: felixfontein
+ $filters/to_yaml.yml:
+ maintainers: felixfontein
$filters/to_years.yml:
maintainers: resmo
+ $filters/unicode_normalize.py:
+ maintainers: Ajpantuso
$filters/version_sort.py:
maintainers: ericzolf
$inventories/:
@@ -177,33 +234,35 @@ files:
maintainers: opoplawski
$inventories/gitlab_runners.py:
maintainers: morph027
+ $inventories/iocage.py:
+ maintainers: vbotka
+ $inventories/icinga2.py:
+ maintainers: BongoEADGC6
$inventories/linode.py:
- maintainers: $team_linode
- labels: cloud linode
keywords: linode dynamic inventory script
+ labels: cloud linode
+ maintainers: $team_linode
$inventories/lxd.py:
maintainers: conloos
$inventories/nmap.py: {}
$inventories/online.py:
maintainers: remyleone
$inventories/opennebula.py:
- maintainers: feldsam
- labels: cloud opennebula
keywords: opennebula dynamic inventory script
- $inventories/proxmox.py:
- maintainers: $team_virt ilijamt
+ labels: cloud opennebula
+ maintainers: feldsam
+ $inventories/scaleway.py:
+ labels: cloud scaleway
+ maintainers: $team_scaleway
+ $inventories/virtualbox.py: {}
$inventories/xen_orchestra.py:
maintainers: ddelnano shinuza
- $inventories/icinga2.py:
- maintainers: BongoEADGC6
- $inventories/scaleway.py:
- maintainers: $team_scaleway
- labels: cloud scaleway
- $inventories/stackpath_compute.py:
- maintainers: shayrybak
- $inventories/virtualbox.py: {}
$lookups/:
labels: lookups
+ $lookups/binary_file.py:
+ maintainers: felixfontein
+ $lookups/bitwarden_secrets_manager.py:
+ maintainers: jantari
$lookups/bitwarden.py:
maintainers: lungj
$lookups/cartesian.py: {}
@@ -213,40 +272,46 @@ files:
$lookups/consul_kv.py: {}
$lookups/credstash.py: {}
$lookups/cyberarkpassword.py:
- notify: cyberark-bizdev
labels: cyberarkpassword
+ notify: cyberark-bizdev
$lookups/dependent.py:
maintainers: felixfontein
$lookups/dig.py:
- maintainers: jpmens
labels: dig
+ maintainers: jpmens
$lookups/dnstxt.py:
maintainers: jpmens
$lookups/dsv.py:
- maintainers: amigus endlesstrax delineaKrehl tylerezimmerman
- $lookups/etcd3.py:
- maintainers: eric-belhomme
+ ignore: amigus
+ maintainers: delineaKrehl tylerezimmerman
$lookups/etcd.py:
maintainers: jpmens
+ $lookups/etcd3.py:
+ maintainers: eric-belhomme
$lookups/filetree.py:
maintainers: dagwieers
$lookups/flattened.py: {}
+ $lookups/github_app_access_token.py:
+ maintainers: weisheng-p blavoie
$lookups/hiera.py:
maintainers: jparrill
$lookups/keyring.py: {}
$lookups/lastpass.py: {}
$lookups/lmdb_kv.py:
maintainers: jpmens
- $lookups/manifold.py:
- maintainers: galanoff
- labels: manifold
+ $lookups/merge_variables.py:
+ maintainers: rlenferink m-a-r-k-e alpex8
$lookups/onepass:
- maintainers: samdoran
labels: onepassword
+ maintainers: samdoran
$lookups/onepassword.py:
- maintainers: azenk scottsb
+ ignore: scottsb
+ maintainers: azenk
$lookups/onepassword_raw.py:
- maintainers: azenk scottsb
+ ignore: scottsb
+ maintainers: azenk
+ $lookups/onepassword_ssh_key.py:
+ maintainers: mohammedbabelly20
$lookups/passwordstore.py: {}
$lookups/random_pet.py:
maintainers: Akasurde
@@ -260,1028 +325,1254 @@ files:
maintainers: RevBits
$lookups/shelvefile.py: {}
$lookups/tss.py:
- maintainers: amigus endlesstrax delineaKrehl tylerezimmerman
+ ignore: amigus
+ maintainers: delineaKrehl tylerezimmerman
$module_utils/:
labels: module_utils
- $module_utils/gconftool2.py:
+ $module_utils/android_sdkmanager.py:
+ maintainers: shamilovstas
+ $module_utils/btrfs.py:
+ maintainers: gnfzdz
+ $module_utils/cmd_runner_fmt.py:
maintainers: russoz
+ $module_utils/cmd_runner.py:
+ maintainers: russoz
+ $module_utils/deps.py:
+ maintainers: russoz
+ $module_utils/django.py:
+ maintainers: russoz
+ $module_utils/gconftool2.py:
labels: gconftool2
+ maintainers: russoz
+ $module_utils/gio_mime.py:
+ maintainers: russoz
$module_utils/gitlab.py:
- notify: jlozadad
- maintainers: $team_gitlab
- labels: gitlab
keywords: gitlab source_control
+ labels: gitlab
+ maintainers: $team_gitlab
+ notify: jlozadad
$module_utils/hwc_utils.py:
- maintainers: $team_huawei
- labels: huawei hwc_utils networking
keywords: cloud huawei hwc
+ labels: huawei hwc_utils networking
+ maintainers: $team_huawei
$module_utils/identity/keycloak/keycloak.py:
maintainers: $team_keycloak
+ $module_utils/identity/keycloak/keycloak_clientsecret.py:
+ maintainers: $team_keycloak fynncfchen johncant
$module_utils/ipa.py:
- maintainers: $team_ipa
labels: ipa
+ maintainers: $team_ipa
+ $module_utils/jenkins.py:
+ labels: jenkins
+ maintainers: russoz
$module_utils/manageiq.py:
- maintainers: $team_manageiq
labels: manageiq
+ maintainers: $team_manageiq
$module_utils/memset.py:
- maintainers: glitchcrab
labels: cloud memset
$module_utils/mh/:
- maintainers: russoz
labels: module_helper
+ maintainers: russoz
$module_utils/module_helper.py:
- maintainers: russoz
labels: module_helper
+ maintainers: russoz
$module_utils/net_tools/pritunl/:
maintainers: Lowess
$module_utils/oracle/oci_utils.py:
- maintainers: $team_oracle
labels: cloud
+ maintainers: $team_oracle
+ $module_utils/pacemaker.py:
+ maintainers: munchtoast
$module_utils/pipx.py:
- maintainers: russoz
labels: pipx
- $module_utils/pure.py:
- maintainers: $team_purestorage
- labels: pure pure_storage
+ maintainers: russoz
+ $module_utils/pkg_req.py:
+ maintainers: russoz
+ $module_utils/python_runner.py:
+ maintainers: russoz
+ $module_utils/puppet.py:
+ labels: puppet
+ maintainers: russoz
$module_utils/redfish_utils.py:
- maintainers: $team_redfish
labels: redfish_utils
+ maintainers: $team_redfish
$module_utils/remote_management/lxca/common.py:
maintainers: navalkp prabhosa
$module_utils/scaleway.py:
- maintainers: $team_scaleway
labels: cloud scaleway
+ maintainers: $team_scaleway
+ $module_utils/snap.py:
+ labels: snap
+ maintainers: russoz
+ $module_utils/ssh.py:
+ maintainers: russoz
+ $module_utils/systemd.py:
+ maintainers: NomakCooper
$module_utils/storage/hpe3par/hpe3par.py:
maintainers: farhan7500 gautamphegde
$module_utils/utm_utils.py:
- maintainers: $team_e_spirit
labels: utm_utils
+ maintainers: $team_e_spirit
+ $module_utils/vardict.py:
+ labels: vardict
+ maintainers: russoz
$module_utils/wdc_redfish_utils.py:
- maintainers: $team_wdc
labels: wdc_redfish_utils
+ maintainers: $team_wdc
+ $module_utils/xdg_mime.py:
+ maintainers: mhalano
$module_utils/xenserver.py:
- maintainers: bvitnik
labels: xenserver
- $module_utils/xfconf.py:
- maintainers: russoz
- labels: xfconf
- $modules/cloud/alicloud/:
- maintainers: xiaozhu36
- $modules/cloud/atomic/atomic_container.py:
- maintainers: giuseppe krsacme
- $modules/cloud/atomic/:
- maintainers: krsacme
- $modules/cloud/centurylink/:
- maintainers: clc-runner
- $modules/cloud/dimensiondata/dimensiondata_network.py:
- maintainers: aimonb tintoy
- labels: dimensiondata_network
- $modules/cloud/dimensiondata/dimensiondata_vlan.py:
- maintainers: tintoy
- $modules/cloud/heroku/heroku_collaborator.py:
- maintainers: marns93
- $modules/cloud/huawei/:
- maintainers: $team_huawei huaweicloud
- keywords: cloud huawei hwc
- $modules/cloud/linode/:
- maintainers: $team_linode
- $modules/cloud/linode/linode.py:
- maintainers: zbal
- $modules/cloud/lxc/lxc_container.py:
- maintainers: cloudnull
- $modules/cloud/lxd/:
- ignore: hnakamur
- $modules/cloud/lxd/lxd_profile.py:
- maintainers: conloos
- $modules/cloud/lxd/lxd_project.py:
- maintainers: we10710aa
- $modules/cloud/memset/:
- maintainers: glitchcrab
- $modules/cloud/misc/cloud_init_data_facts.py:
- maintainers: resmo
- $modules/cloud/misc/proxmox:
- maintainers: $team_virt
- labels: proxmox virt
- keywords: kvm libvirt proxmox qemu
- $modules/cloud/misc/proxmox.py:
- maintainers: UnderGreen
- ignore: skvidal
- $modules/cloud/misc/proxmox_kvm.py:
- maintainers: helldorado
- ignore: skvidal
- $modules/cloud/misc/proxmox_nic.py:
- maintainers: Kogelvis
- $modules/cloud/misc/proxmox_tasks_info:
- maintainers: paginabianca
- $modules/cloud/misc/proxmox_template.py:
- maintainers: UnderGreen
- ignore: skvidal
- $modules/cloud/misc/rhevm.py:
- maintainers: $team_virt TimothyVandenbrande
- labels: rhevm virt
- ignore: skvidal
- keywords: kvm libvirt proxmox qemu
- $modules/cloud/misc/:
- ignore: ryansb
- $modules/cloud/misc/terraform.py:
- maintainers: m-yosefpor rainerleber
- $modules/cloud/misc/xenserver_facts.py:
- maintainers: caphrim007 cheese
- labels: xenserver_facts
- ignore: andyhky
- $modules/cloud/oneandone/:
- maintainers: aajdinov edevenport
- $modules/cloud/online/:
- maintainers: remyleone
- $modules/cloud/opennebula/:
- maintainers: $team_opennebula
- $modules/cloud/opennebula/one_host.py:
- maintainers: rvalle
- $modules/cloud/oracle/oci_vcn.py:
- maintainers: $team_oracle rohitChaware
- $modules/cloud/ovh/:
- maintainers: pascalheraud
- $modules/cloud/ovh/ovh_monthly_billing.py:
- maintainers: fraff
- $modules/cloud/packet/packet_device.py:
- maintainers: baldwinSPC t0mk teebes
- $modules/cloud/packet/:
- maintainers: nurfet-becirevic t0mk
- $modules/cloud/packet/packet_sshkey.py:
- maintainers: t0mk
- $modules/cloud/profitbricks/:
- maintainers: baldwinSPC
- $modules/cloud/pubnub/pubnub_blocks.py:
- maintainers: parfeon pubnub
- $modules/cloud/rackspace/rax.py:
- maintainers: omgjlk sivel
- $modules/cloud/rackspace/:
- ignore: ryansb sivel
- $modules/cloud/rackspace/rax_cbs.py:
- maintainers: claco
- $modules/cloud/rackspace/rax_cbs_attachments.py:
- maintainers: claco
- $modules/cloud/rackspace/rax_cdb.py:
- maintainers: jails
- $modules/cloud/rackspace/rax_cdb_user.py:
- maintainers: jails
- $modules/cloud/rackspace/rax_cdb_database.py:
- maintainers: jails
- $modules/cloud/rackspace/rax_clb.py:
- maintainers: claco
- $modules/cloud/rackspace/rax_clb_nodes.py:
- maintainers: neuroid
- $modules/cloud/rackspace/rax_clb_ssl.py:
- maintainers: smashwilson
- $modules/cloud/rackspace/rax_files.py:
- maintainers: angstwad
- $modules/cloud/rackspace/rax_files_objects.py:
- maintainers: angstwad
- $modules/cloud/rackspace/rax_identity.py:
- maintainers: claco
- $modules/cloud/rackspace/rax_network.py:
- maintainers: claco omgjlk
- $modules/cloud/rackspace/rax_mon_alarm.py:
- maintainers: smashwilson
- $modules/cloud/rackspace/rax_mon_check.py:
- maintainers: smashwilson
- $modules/cloud/rackspace/rax_mon_entity.py:
- maintainers: smashwilson
- $modules/cloud/rackspace/rax_mon_notification.py:
- maintainers: smashwilson
- $modules/cloud/rackspace/rax_mon_notification_plan.py:
- maintainers: smashwilson
- $modules/cloud/rackspace/rax_queue.py:
- maintainers: claco
- $modules/cloud/scaleway/:
- maintainers: $team_scaleway
- $modules/cloud/scaleway/scaleway_compute_private_network.py:
- maintainers: pastral
- $modules/cloud/scaleway/scaleway_database_backup.py:
- maintainers: guillaume_ro_fr
- $modules/cloud/scaleway/scaleway_image_info.py:
- maintainers: Spredzy
- $modules/cloud/scaleway/scaleway_ip_info.py:
- maintainers: Spredzy
- $modules/cloud/scaleway/scaleway_organization_info.py:
- maintainers: Spredzy
- $modules/cloud/scaleway/scaleway_private_network.py:
- maintainers: pastral
- $modules/cloud/scaleway/scaleway_security_group.py:
- maintainers: DenBeke
- $modules/cloud/scaleway/scaleway_security_group_info.py:
- maintainers: Spredzy
- $modules/cloud/scaleway/scaleway_security_group_rule.py:
- maintainers: DenBeke
- $modules/cloud/scaleway/scaleway_server_info.py:
- maintainers: Spredzy
- $modules/cloud/scaleway/scaleway_snapshot_info.py:
- maintainers: Spredzy
- $modules/cloud/scaleway/scaleway_volume.py:
- labels: scaleway_volume
- ignore: hekonsek
- $modules/cloud/scaleway/scaleway_volume_info.py:
- maintainers: Spredzy
- $modules/cloud/smartos/:
- maintainers: $team_solaris
- labels: solaris
- keywords: beadm dladm illumos ipadm nexenta omnios openindiana pfexec smartos solaris sunos zfs zpool
- $modules/cloud/smartos/nictagadm.py:
- maintainers: SmithX10
- $modules/cloud/softlayer/sl_vm.py:
- maintainers: mcltn
- $modules/cloud/spotinst/spotinst_aws_elastigroup.py:
- maintainers: talzur
- $modules/cloud/univention/:
- maintainers: keachi
- $modules/cloud/webfaction/:
- maintainers: quentinsf
- $modules/cloud/xenserver/:
maintainers: bvitnik
- $modules/clustering/consul/:
- maintainers: $team_consul
- ignore: colin-nolan
- $modules/clustering/etcd3.py:
- maintainers: evrardjp
- ignore: vfauth
- $modules/clustering/nomad/:
- maintainers: chris93111
- $modules/clustering/pacemaker_cluster.py:
- maintainers: matbu
- $modules/clustering/znode.py:
- maintainers: treyperry
- $modules/database/aerospike/aerospike_migrations.py:
+ $module_utils/xfconf.py:
+ labels: xfconf
+ maintainers: russoz
+ $modules/aerospike_migrations.py:
maintainers: Alb0t
- $modules/database/influxdb/:
- maintainers: kamsz
- $modules/database/influxdb/influxdb_query.py:
- maintainers: resmo
- $modules/database/influxdb/influxdb_user.py:
- maintainers: zhhuta
- $modules/database/influxdb/influxdb_write.py:
- maintainers: resmo
- $modules/database/misc/elasticsearch_plugin.py:
- maintainers: ThePixelDeveloper samdoran
- $modules/database/misc/kibana_plugin.py:
- maintainers: barryib
- $modules/database/misc/odbc.py:
- maintainers: john-westcott-iv
- $modules/database/misc/redis.py:
- maintainers: slok
- $modules/database/misc/redis_info.py:
- maintainers: levonet
- $modules/database/misc/redis_data_info.py:
- maintainers: paginabianca
- $modules/database/misc/redis_data.py:
- maintainers: paginabianca
- $modules/database/misc/redis_data_incr.py:
- maintainers: paginabianca
- $modules/database/misc/riak.py:
- maintainers: drewkerrigan jsmartin
- $modules/database/mssql/mssql_db.py:
- maintainers: vedit Jmainguy kenichi-ogawa-1988
- labels: mssql_db
- $modules/database/mssql/mssql_script.py:
- maintainers: kbudde
- labels: mssql_script
- $modules/database/saphana/hana_query.py:
- maintainers: rainerleber
- $modules/database/vertica/:
- maintainers: dareko
- $modules/files/archive.py:
- maintainers: bendoh
- $modules/files/filesize.py:
- maintainers: quidame
- $modules/files/ini_file.py:
- maintainers: jpmens noseka1
- $modules/files/iso_create.py:
- maintainers: Tomorrow9
- $modules/files/iso_extract.py:
- maintainers: dagwieers jhoekx ribbons
- $modules/files/read_csv.py:
- maintainers: dagwieers
- $modules/files/sapcar_extract.py:
- maintainers: RainerLeber
- $modules/files/xattr.py:
- maintainers: bcoca
- labels: xattr
- $modules/files/xml.py:
- maintainers: dagwieers magnus919 tbielawa cmprescott sm4rk0
- labels: m:xml xml
- ignore: magnus919
- $modules/identity/ipa/:
- maintainers: $team_ipa
- $modules/identity/ipa/ipa_pwpolicy.py:
- maintainers: adralioh
- $modules/identity/ipa/ipa_service.py:
- maintainers: cprh
- $modules/identity/ipa/ipa_vault.py:
- maintainers: jparrill
- $modules/identity/keycloak/:
- maintainers: $team_keycloak
- $modules/identity/keycloak/keycloak_authentication.py:
- maintainers: elfelip Gaetan2907
- $modules/identity/keycloak/keycloak_clientscope.py:
- maintainers: Gaetan2907
- $modules/identity/keycloak/keycloak_client_rolemapping.py:
- maintainers: Gaetan2907
- $modules/identity/keycloak/keycloak_group.py:
- maintainers: adamgoossens
- $modules/identity/keycloak/keycloak_identity_provider.py:
- maintainers: laurpaum
- $modules/identity/keycloak/keycloak_realm_info.py:
- maintainers: fynncfchen
- $modules/identity/keycloak/keycloak_realm.py:
- maintainers: kris2kris
- $modules/identity/keycloak/keycloak_role.py:
- maintainers: laurpaum
- $modules/identity/keycloak/keycloak_user_federation.py:
- maintainers: laurpaum
- $modules/identity/onepassword_info.py:
- maintainers: Rylon
- $modules/identity/opendj/opendj_backendprop.py:
- maintainers: dj-wasabi
- $modules/monitoring/airbrake_deployment.py:
- maintainers: phumpal
+ $modules/airbrake_deployment.py:
+ ignore: bpennypacker
labels: airbrake_deployment
- ignore: bpennypacker
- $modules/monitoring/alerta_customer.py:
- maintainers: cwollinger
- $modules/monitoring/bigpanda.py:
- maintainers: hkariti
- $modules/monitoring/circonus_annotation.py:
- maintainers: NickatEpic
- $modules/monitoring/datadog/datadog_event.py:
- maintainers: n0ts
- labels: datadog_event
- ignore: arturaz
- $modules/monitoring/datadog/datadog_downtime.py:
- maintainers: Datadog
- $modules/monitoring/datadog/datadog_monitor.py:
- maintainers: skornehl
- $modules/monitoring/honeybadger_deployment.py:
- maintainers: stympy
- $modules/monitoring/icinga2_feature.py:
- maintainers: nerzhul
- $modules/monitoring/icinga2_host.py:
- maintainers: t794104
- $modules/monitoring/librato_annotation.py:
- maintainers: Sedward
- $modules/monitoring/logentries.py:
- labels: logentries
- ignore: ivanvanderbyl
- $modules/monitoring/logstash_plugin.py:
- maintainers: nerzhul
- $modules/monitoring/monit.py:
- maintainers: dstoflet brian-brazil snopoke
- labels: monit
- $modules/monitoring/nagios.py:
- maintainers: tbielawa tgoetheyn
- $modules/monitoring/newrelic_deployment.py:
- ignore: mcodd
- $modules/monitoring/pagerduty.py:
- maintainers: suprememoocow thaumos
- labels: pagerduty
- ignore: bpennypacker
- $modules/monitoring/pagerduty_alert.py:
- maintainers: ApsOps
- $modules/monitoring/pagerduty_change.py:
- maintainers: adamvaughan
- $modules/monitoring/pagerduty_user.py:
- maintainers: zanssa
- $modules/monitoring/pingdom.py:
- maintainers: thaumos
- $modules/monitoring/rollbar_deployment.py:
- maintainers: kavu
- $modules/monitoring/sensu/sensu_check.py:
- maintainers: andsens
- $modules/monitoring/sensu/:
- maintainers: dmsimard
- $modules/monitoring/sensu/sensu_silence.py:
- maintainers: smbambling
- $modules/monitoring/sensu/sensu_subscription.py:
- maintainers: andsens
- $modules/monitoring/spectrum_device.py:
- maintainers: orgito
- $modules/monitoring/spectrum_model_attrs.py:
- maintainers: tgates81
- $modules/monitoring/stackdriver.py:
- maintainers: bwhaley
- $modules/monitoring/statsd.py:
- maintainers: mamercad
- $modules/monitoring/statusio_maintenance.py:
- maintainers: bhcopeland
- $modules/monitoring/uptimerobot.py:
- maintainers: nate-kingsley
- $modules/net_tools/cloudflare_dns.py:
- maintainers: mgruener
- labels: cloudflare_dns
- $modules/net_tools/dnsimple.py:
- maintainers: drcapulet
- $modules/net_tools/dnsimple_info.py:
- maintainers: edhilgendorf
- $modules/net_tools/dnsmadeeasy.py:
- maintainers: briceburg
- $modules/net_tools/gandi_livedns.py:
- maintainers: gthiemonge
- $modules/net_tools/haproxy.py:
- maintainers: ravibhure Normo
- $modules/net_tools/infinity/infinity.py:
- maintainers: MeganLiu
- $modules/net_tools/ip_netns.py:
- maintainers: bregman-arie
- $modules/net_tools/ipify_facts.py:
- maintainers: resmo
- $modules/net_tools/ipinfoio_facts.py:
- maintainers: akostyuk
- $modules/net_tools/ipwcli_dns.py:
- maintainers: cwollinger
- $modules/net_tools/ldap/ldap_attrs.py:
- maintainers: drybjed jtyr noles
- $modules/net_tools/ldap/ldap_entry.py:
- maintainers: jtyr
- $modules/net_tools/ldap/ldap_passwd.py:
- maintainers: KellerFuchs jtyr
- $modules/net_tools/ldap/ldap_search.py:
- maintainers: eryx12o45 jtyr
- $modules/net_tools/lldp.py:
- labels: lldp
- ignore: andyhky
- $modules/net_tools/netcup_dns.py:
- maintainers: nbuchwitz
- $modules/net_tools/nsupdate.py:
- maintainers: nerzhul
- $modules/net_tools/omapi_host.py:
- maintainers: amasolov nerzhul
- $modules/net_tools/pritunl/:
- maintainers: Lowess
- $modules/net_tools/nmcli.py:
- maintainers: alcamie101
- $modules/net_tools/snmp_facts.py:
- maintainers: ogenstad ujwalkomarla
- $modules/notification/bearychat.py:
- maintainers: tonyseek
- $modules/notification/campfire.py:
- maintainers: fabulops
- $modules/notification/catapult.py:
- maintainers: Jmainguy
- $modules/notification/cisco_webex.py:
- maintainers: drew-russell
- $modules/notification/discord.py:
- maintainers: cwollinger
- $modules/notification/flowdock.py:
- ignore: mcodd
- $modules/notification/grove.py:
- maintainers: zimbatm
- $modules/notification/hipchat.py:
- maintainers: pb8226 shirou
- $modules/notification/irc.py:
- maintainers: jpmens sivel
- $modules/notification/jabber.py:
- maintainers: bcoca
- $modules/notification/logentries_msg.py:
- maintainers: jcftang
- $modules/notification/mail.py:
- maintainers: dagwieers
- $modules/notification/matrix.py:
- maintainers: jcgruenhage
- $modules/notification/mattermost.py:
- maintainers: bjolivot
- $modules/notification/mqtt.py:
- maintainers: jpmens
- $modules/notification/nexmo.py:
- maintainers: sivel
- $modules/notification/office_365_connector_card.py:
- maintainers: marc-sensenich
- $modules/notification/pushbullet.py:
- maintainers: willybarro
- $modules/notification/pushover.py:
- maintainers: weaselkeeper wopfel
- $modules/notification/rocketchat.py:
- maintainers: Deepakkothandan
- labels: rocketchat
- ignore: ramondelafuente
- $modules/notification/say.py:
- maintainers: $team_ansible_core mpdehaan
- $modules/notification/sendgrid.py:
- maintainers: makaimc
- $modules/notification/slack.py:
- maintainers: ramondelafuente
- $modules/notification/syslogger.py:
- maintainers: garbled1
- $modules/notification/telegram.py:
- maintainers: tyouxa loms lomserman
- $modules/notification/twilio.py:
- maintainers: makaimc
- $modules/notification/typetalk.py:
- maintainers: tksmd
- $modules/packaging/language/ansible_galaxy_install.py:
- maintainers: russoz
- $modules/packaging/language/bower.py:
- maintainers: mwarkentin
- $modules/packaging/language/bundler.py:
- maintainers: thoiberg
- $modules/packaging/language/cargo.py:
- maintainers: radek-sprta
- $modules/packaging/language/composer.py:
- maintainers: dmtrs
- ignore: resmo
- $modules/packaging/language/cpanm.py:
- maintainers: fcuny russoz
- $modules/packaging/language/easy_install.py:
- maintainers: mattupstate
- $modules/packaging/language/gem.py:
- maintainers: $team_ansible_core johanwiren
- labels: gem
- $modules/packaging/language/maven_artifact.py:
- maintainers: tumbl3w33d turb
- labels: maven_artifact
- ignore: chrisisbeef
- $modules/packaging/language/npm.py:
- maintainers: shane-walker xcambar
- labels: npm
- ignore: chrishoffman
- $modules/packaging/language/pear.py:
- labels: pear
- ignore: jle64
- $modules/packaging/language/pip_package_info.py:
- maintainers: bcoca matburt maxamillion
- $modules/packaging/language/pipx.py:
- maintainers: russoz
- $modules/packaging/language/yarn.py:
- maintainers: chrishoffman verkaufer
- $modules/packaging/os/apk.py:
- maintainers: tdtrask
- labels: apk
- ignore: kbrebanov
- $modules/packaging/os/apt_repo.py:
- maintainers: obirvalger
- $modules/packaging/os/apt_rpm.py:
- maintainers: evgkrsk
- $modules/packaging/os/copr.py:
- maintainers: schlupov
- $modules/packaging/os/dnf_versionlock.py:
- maintainers: moreda
- $modules/packaging/os/flatpak.py:
- maintainers: $team_flatpak
- $modules/packaging/os/flatpak_remote.py:
- maintainers: $team_flatpak
- $modules/packaging/os/pkg5:
- maintainers: $team_solaris mavit
- labels: pkg5 solaris
- keywords: beadm dladm illumos ipadm nexenta omnios openindiana pfexec smartos solaris sunos zfs zpool
- $modules/packaging/os/homebrew.py:
- notify: chris-short
- maintainers: $team_macos andrew-d
- labels: homebrew macos
- ignore: ryansb
- keywords: brew cask darwin homebrew macosx macports osx
- $modules/packaging/os/homebrew_cask.py:
- notify: chris-short
- maintainers: $team_macos enriclluelles
- labels: homebrew_ macos
- ignore: ryansb
- keywords: brew cask darwin homebrew macosx macports osx
- $modules/packaging/os/homebrew_tap.py:
- notify: chris-short
- maintainers: $team_macos
- labels: homebrew_ macos
- ignore: ryansb
- keywords: brew cask darwin homebrew macosx macports osx
- $modules/packaging/os/installp.py:
- maintainers: $team_aix kairoaraujo
- labels: aix installp
+ maintainers: phumpal
+ $modules/aix:
keywords: aix efix lpar wpar
- $modules/packaging/os/layman.py:
- maintainers: jirutka
- $modules/packaging/os/macports.py:
- notify: chris-short
- maintainers: $team_macos jcftang
- labels: macos macports
- ignore: ryansb
- keywords: brew cask darwin homebrew macosx macports osx
- $modules/packaging/os/mas.py:
- maintainers: lukasbestle mheap
- $modules/packaging/os/openbsd_pkg.py:
- maintainers: $team_bsd eest
- labels: bsd openbsd_pkg
- ignore: ryansb
- keywords: doas dragonfly freebsd iocage jail netbsd openbsd opnsense pfsense
- $modules/packaging/os/opkg.py:
- maintainers: skinp
- $modules/packaging/os/pacman.py:
- maintainers: elasticdog indrajitr tchernomax jraby
- labels: pacman
- ignore: elasticdog
- $modules/packaging/os/pacman_key.py:
- maintainers: grawlinson
- labels: pacman
- $modules/packaging/os/pkgin.py:
- maintainers: $team_solaris L2G jasperla szinck martinm82
- labels: pkgin solaris
- $modules/packaging/os/pkgng.py:
- maintainers: $team_bsd bleader
- labels: bsd pkgng
- ignore: bleader
- keywords: doas dragonfly freebsd iocage jail netbsd openbsd opnsense pfsense
- $modules/packaging/os/pkgutil.py:
- maintainers: $team_solaris dermute
- labels: pkgutil solaris
- $modules/packaging/os/portage.py:
- maintainers: Tatsh wltjr
- labels: portage
- ignore: sayap
- $modules/packaging/os/portinstall.py:
- maintainers: $team_bsd berenddeboer
- labels: bsd portinstall
- ignore: ryansb
- keywords: doas dragonfly freebsd iocage jail netbsd openbsd opnsense pfsense
- $modules/packaging/os/pulp_repo.py:
- maintainers: sysadmind
- $modules/packaging/os/redhat_subscription.py:
- maintainers: barnabycourt alikins kahowell
- labels: redhat_subscription
- $modules/packaging/os/rhn_channel.py:
- maintainers: vincentvdk alikins $team_rhn
- labels: rhn_channel
- $modules/packaging/os/rhn_register.py:
- maintainers: jlaska $team_rhn
- labels: rhn_register
- $modules/packaging/os/rhsm_release.py:
- maintainers: seandst
- $modules/packaging/os/rhsm_repository.py:
- maintainers: giovannisciortino
- $modules/packaging/os/rpm_ostree_pkg.py:
- maintainers: dustymabe Akasurde
- $modules/packaging/os/slackpkg.py:
- maintainers: KimNorgaard
- $modules/packaging/os/snap.py:
- maintainers: angristan vcarceler
- labels: snap
- $modules/packaging/os/snap_alias.py:
- maintainers: russoz
- labels: snap
- $modules/packaging/os/sorcery.py:
- maintainers: vaygr
- $modules/packaging/os/svr4pkg.py:
- maintainers: $team_solaris brontitall
- labels: solaris svr4pkg
- $modules/packaging/os/swdepot.py:
- maintainers: $team_hpux melodous
- labels: hpux swdepot
- keywords: hp-ux
- $modules/packaging/os/swupd.py:
- maintainers: hnanni albertomurillo
- labels: swupd
- $modules/packaging/os/urpmi.py:
- maintainers: pmakowski
- $modules/packaging/os/xbps.py:
- maintainers: dinoocch the-maldridge
- $modules/packaging/os/yum_versionlock.py:
- maintainers: gyptazy aminvakil
- $modules/packaging/os/zypper.py:
- maintainers: $team_suse
- labels: zypper
- ignore: dirtyharrycallahan robinro
- $modules/packaging/os/zypper_repository.py:
- maintainers: $team_suse
- labels: zypper
- ignore: matze
- $modules/remote_management/cobbler/:
- maintainers: dagwieers
- $modules/remote_management/hpilo/:
- maintainers: haad
- ignore: dagwieers
- $modules/remote_management/imc/imc_rest.py:
- maintainers: dagwieers
- labels: cisco
- $modules/remote_management/ipmi/:
- maintainers: bgaifullin cloudnull
- $modules/remote_management/lenovoxcc/:
- maintainers: panyy3 renxulei
- $modules/remote_management/lxca/:
- maintainers: navalkp prabhosa
- $modules/remote_management/manageiq/:
- labels: manageiq
- maintainers: $team_manageiq
- $modules/remote_management/manageiq/manageiq_alert_profiles.py:
- maintainers: elad661
- $modules/remote_management/manageiq/manageiq_alerts.py:
- maintainers: elad661
- $modules/remote_management/manageiq/manageiq_group.py:
- maintainers: evertmulder
- $modules/remote_management/manageiq/manageiq_tenant.py:
- maintainers: evertmulder
- $modules/remote_management/oneview/:
- maintainers: adriane-cardozo fgbulsoni tmiotto
- $modules/remote_management/oneview/oneview_datacenter_info.py:
- maintainers: aalexmonteiro madhav-bharadwaj ricardogpsf soodpr
- $modules/remote_management/oneview/oneview_fc_network.py:
- maintainers: fgbulsoni
- $modules/remote_management/oneview/oneview_fcoe_network.py:
- maintainers: fgbulsoni
- $modules/remote_management/redfish/:
- maintainers: $team_redfish
- ignore: jose-delarosa
- $modules/remote_management/redfish/wdc_redfish_command.py:
- maintainers: $team_wdc
- $modules/remote_management/redfish/wdc_redfish_info.py:
- maintainers: $team_wdc
- $modules/remote_management/stacki/stacki_host.py:
- maintainers: bsanders bbyhuy
- labels: stacki_host
- $modules/remote_management/wakeonlan.py:
- maintainers: dagwieers
- $modules/source_control/bitbucket/:
- maintainers: catcombo
- $modules/source_control/bzr.py:
- maintainers: andreparames
- $modules/source_control/git_config.py:
- maintainers: djmattyg007 mgedmin
- $modules/source_control/github/github_deploy_key.py:
- maintainers: bincyber
- $modules/source_control/github/github_issue.py:
- maintainers: Akasurde
- $modules/source_control/github/github_key.py:
- maintainers: erydo
- labels: github_key
- ignore: erydo
- $modules/source_control/github/github_release.py:
- maintainers: adrianmoisey
- $modules/source_control/github/github_repo.py:
- maintainers: atorrescogollo
- $modules/source_control/github/:
- maintainers: stpierre
- $modules/source_control/gitlab/:
- notify: jlozadad
- maintainers: $team_gitlab
- keywords: gitlab source_control
- $modules/source_control/gitlab/gitlab_project_variable.py:
- maintainers: markuman
- $modules/source_control/gitlab/gitlab_runner.py:
- maintainers: SamyCoenen
- $modules/source_control/gitlab/gitlab_user.py:
- maintainers: LennertMertens stgrace
- $modules/source_control/gitlab/gitlab_branch.py:
- maintainers: paytroff
- $modules/source_control/hg.py:
- maintainers: yeukhon
- $modules/storage/emc/emc_vnx_sg_member.py:
- maintainers: remixtj
- $modules/storage/hpe3par/ss_3par_cpg.py:
- maintainers: farhan7500 gautamphegde
- $modules/storage/ibm/:
- maintainers: tzure
- $modules/storage/pmem/pmem.py:
- maintainers: mizumm
- $modules/storage/vexata/:
- maintainers: vexata
- $modules/storage/zfs/:
- maintainers: $team_solaris
- labels: solaris
- keywords: beadm dladm illumos ipadm nexenta omnios openindiana pfexec smartos solaris sunos zfs zpool
- $modules/storage/zfs/zfs.py:
- maintainers: johanwiren
- $modules/storage/zfs/zfs_delegate_admin.py:
- maintainers: natefoo
- $modules/system/aix:
- maintainers: $team_aix
labels: aix
- keywords: aix efix lpar wpar
- $modules/system/alternatives.py:
- maintainers: mulby
- labels: alternatives
- ignore: DavidWittman jiuka
- $modules/system/aix_lvol.py:
- maintainers: adejoux
- $modules/system/awall.py:
- maintainers: tdtrask
- $modules/system/beadm.py:
- maintainers: $team_solaris
- labels: beadm solaris
- keywords: beadm dladm illumos ipadm nexenta omnios openindiana pfexec smartos solaris sunos zfs zpool
- $modules/system/capabilities.py:
- maintainers: natefoo
- $modules/system/cronvar.py:
- maintainers: dougluce
- $modules/system/crypttab.py:
- maintainers: groks
- $modules/system/dconf.py:
- maintainers: azaghal
- $modules/system/dpkg_divert.py:
- maintainers: quidame
- $modules/system/facter.py:
- maintainers: $team_ansible_core gamethis
- labels: facter
- $modules/system/filesystem.py:
- maintainers: pilou- abulimov quidame
- labels: filesystem
- $modules/system/gconftool2.py:
- maintainers: Akasurde kevensen
- labels: gconftool2
- $modules/system/gconftool2_info.py:
- maintainers: russoz
- labels: gconftool2
- $modules/system/homectl.py:
- maintainers: jameslivulpi
- $modules/system/interfaces_file.py:
- maintainers: obourdon hryamzik
- labels: interfaces_file
- $modules/system/iptables_state.py:
- maintainers: quidame
- $modules/system/keyring.py:
- maintainers: ahussey-redhat
- $modules/system/keyring_info.py:
- maintainers: ahussey-redhat
- $modules/system/shutdown.py:
- maintainers: nitzmahone samdoran aminvakil
- $modules/system/java_cert.py:
- maintainers: haad absynth76
- $modules/system/java_keystore.py:
- maintainers: Mogztter quidame
- $modules/system/kernel_blacklist.py:
- maintainers: matze
- $modules/system/launchd.py:
- maintainers: martinm82
- $modules/system/lbu.py:
- maintainers: kunkku
- $modules/system/listen_ports_facts.py:
- maintainers: ndavison
- $modules/system/locale_gen.py:
- maintainers: AugustusKling
- $modules/system/lvg.py:
- maintainers: abulimov
- $modules/system/lvol.py:
- maintainers: abulimov jhoekx zigaSRC unkaputtbar112
- $modules/system/make.py:
- maintainers: LinusU
- $modules/system/mksysb.py:
maintainers: $team_aix
- labels: aix mksysb
- $modules/system/modprobe.py:
- maintainers: jdauphant mattjeffery
- labels: modprobe
- ignore: stygstra
- $modules/system/nosh.py:
- maintainers: tacatac
- $modules/system/ohai.py:
- maintainers: $team_ansible_core mpdehaan
- labels: ohai
- $modules/system/open_iscsi.py:
- maintainers: srvg
- $modules/system/openwrt_init.py:
- maintainers: agaffney
- $modules/system/osx_defaults.py:
- notify: chris-short
- maintainers: $team_macos notok
- labels: macos osx_defaults
- keywords: brew cask darwin homebrew macosx macports osx
- $modules/system/pam_limits.py:
- maintainers: giovannisciortino
- labels: pam_limits
- ignore: usawa
- $modules/system/pamd.py:
- maintainers: kevensen
- $modules/system/parted.py:
- maintainers: ColOfAbRiX rosowiecki jake2184
- $modules/system/pids.py:
- maintainers: saranyasridharan
- $modules/system/puppet.py:
- maintainers: nibalizer emonty
- labels: puppet
- $modules/system/python_requirements_info.py:
- maintainers: willthames
- ignore: ryansb
- $modules/system/runit.py:
- maintainers: jsumners
- $modules/system/sap_task_list_execute:
- maintainers: rainerleber
- $modules/system/sefcontext.py:
- maintainers: dagwieers
- $modules/system/selinux_permissive.py:
- maintainers: mscherer
- $modules/system/selogin.py:
- maintainers: bachradsusi dankeder jamescassell
- $modules/system/seport.py:
- maintainers: dankeder
- $modules/system/solaris_zone.py:
- maintainers: $team_solaris pmarkham
- labels: solaris
- keywords: beadm dladm illumos ipadm nexenta omnios openindiana pfexec smartos solaris sunos zfs zpool
- $modules/system/ssh_config.py:
- maintainers: gaqzi Akasurde
- $modules/system/sudoers.py:
- maintainers: JonEllis
- $modules/system/svc.py:
- maintainers: bcoca
- $modules/system/syspatch.py:
- maintainers: precurse
- $modules/system/sysrc.py:
- maintainers: dlundgren
- $modules/system/sysupgrade.py:
- maintainers: precurse
- $modules/system/timezone.py:
- maintainers: indrajitr jasperla tmshn
- $modules/system/ufw.py:
- notify: felixfontein
- maintainers: ahtik ovcharenko pyykkis
- labels: ufw
- $modules/system/vdo.py:
- maintainers: rhawalsh bgurney-rh
- $modules/system/xfconf.py:
- maintainers: russoz jbenden
- labels: xfconf
- $modules/system/xfconf_info.py:
+ $modules/aix_lvol.py:
+ maintainers: adejoux
+ $modules/alerta_customer.py:
+ maintainers: cwollinger
+ $modules/ali_:
+ maintainers: xiaozhu36
+ $modules/alternatives.py:
+ ignore: DavidWittman jiuka
+ labels: alternatives
+ maintainers: mulby
+ $modules/android_sdk.py:
+ maintainers: shamilovstas
+ $modules/ansible_galaxy_install.py:
maintainers: russoz
- labels: xfconf
- $modules/system/xfs_quota.py:
- maintainers: bushvin
- $modules/web_infrastructure/apache2_mod_proxy.py:
+ $modules/apache2_mod_proxy.py:
maintainers: oboukili
- $modules/web_infrastructure/apache2_module.py:
- maintainers: berendt n0trax
+ $modules/apache2_module.py:
ignore: robinro
- $modules/web_infrastructure/deploy_helper.py:
+ maintainers: berendt n0trax
+ $modules/apk.py:
+ ignore: kbrebanov
+ labels: apk
+ maintainers: tdtrask
+ $modules/apt_repo.py:
+ maintainers: obirvalger
+ $modules/apt_rpm.py:
+ maintainers: evgkrsk
+ $modules/archive.py:
+ maintainers: bendoh
+ $modules/atomic_:
+ maintainers: krsacme
+ $modules/atomic_container.py:
+ maintainers: giuseppe krsacme
+ $modules/awall.py:
+ maintainers: tdtrask
+ $modules/beadm.py:
+ keywords: beadm dladm illumos ipadm nexenta omnios openindiana pfexec smartos solaris sunos zfs zpool
+ labels: beadm solaris
+ maintainers: $team_solaris
+ $modules/bigpanda.py:
+ ignore: hkariti
+ $modules/bitbucket_:
+ maintainers: catcombo
+ $modules/bootc_manage.py:
+ maintainers: cooktheryan
+ $modules/bower.py:
+ maintainers: mwarkentin
+ $modules/btrfs_:
+ maintainers: gnfzdz
+ $modules/bundler.py:
+ maintainers: thoiberg
+ $modules/bzr.py:
+ maintainers: andreparames
+ $modules/campfire.py:
+ maintainers: fabulops
+ $modules/capabilities.py:
+ maintainers: natefoo
+ $modules/cargo.py:
+ maintainers: radek-sprta
+ $modules/catapult.py:
+ maintainers: Jmainguy
+ $modules/circonus_annotation.py:
+ maintainers: NickatEpic
+ $modules/cisco_webex.py:
+ maintainers: drew-russell
+ $modules/cloud_init_data_facts.py:
+ maintainers: resmo
+ $modules/cloudflare_dns.py:
+ labels: cloudflare_dns
+ maintainers: mgruener
+ $modules/cobbler_:
+ maintainers: dagwieers
+ $modules/composer.py:
+ ignore: resmo
+ maintainers: dmtrs
+ $modules/consul:
+ ignore: colin-nolan Hakon
+ maintainers: $team_consul
+ $modules/copr.py:
+ maintainers: schlupov
+ $modules/cpanm.py:
+ maintainers: fcuny russoz
+ $modules/cronvar.py:
+ maintainers: dougluce
+ $modules/crypttab.py:
+ maintainers: groks
+ $modules/datadog_downtime.py:
+ maintainers: Datadog
+ $modules/datadog_event.py:
+ ignore: arturaz
+ labels: datadog_event
+ maintainers: n0ts
+ $modules/datadog_monitor.py:
+ ignore: skornehl
+ $modules/dconf.py:
+ maintainers: azaghal
+ $modules/decompress.py:
+ maintainers: shamilovstas
+ $modules/deploy_helper.py:
maintainers: ramondelafuente
- $modules/web_infrastructure/django_manage.py:
+ $modules/dimensiondata_network.py:
+ labels: dimensiondata_network
+ maintainers: aimonb tintoy
+ $modules/dimensiondata_vlan.py:
+ maintainers: tintoy
+ $modules/discord.py:
+ maintainers: cwollinger
+ $modules/django_check.py:
maintainers: russoz
+ $modules/django_command.py:
+ maintainers: russoz
+ $modules/django_createcachetable.py:
+ maintainers: russoz
+ $modules/django_dumpdata.py:
+ maintainers: russoz
+ $modules/django_loaddata.py:
+ maintainers: russoz
+ $modules/django_manage.py:
ignore: scottanderson42 tastychutney
labels: django_manage
- $modules/web_infrastructure/ejabberd_user.py:
+ maintainers: russoz
+ $modules/dnf_versionlock.py:
+ maintainers: moreda
+ $modules/dnf_config_manager.py:
+ maintainers: ahyattdev
+ $modules/dnsimple.py:
+ maintainers: drcapulet
+ $modules/dnsimple_info.py:
+ maintainers: edhilgendorf
+ $modules/dnsmadeeasy.py:
+ maintainers: briceburg
+ $modules/dpkg_divert.py:
+ maintainers: quidame
+ $modules/easy_install.py:
+ maintainers: mattupstate
+ $modules/ejabberd_user.py:
maintainers: privateip
- $modules/web_infrastructure/gunicorn.py:
- maintainers: agmezr
- $modules/web_infrastructure/htpasswd.py:
- maintainers: $team_ansible_core
- labels: htpasswd
- $modules/web_infrastructure/jboss.py:
- maintainers: $team_jboss jhoekx
- labels: jboss
- $modules/web_infrastructure/jenkins_build.py:
- maintainers: brettmilford unnecessary-username
- $modules/web_infrastructure/jenkins_job.py:
- maintainers: sermilrod
- $modules/web_infrastructure/jenkins_job_info.py:
+ $modules/elasticsearch_plugin.py:
+ maintainers: ThePixelDeveloper samdoran
+ $modules/emc_vnx_sg_member.py:
+ maintainers: remixtj
+ $modules/etcd3.py:
+ ignore: vfauth
+ maintainers: evrardjp
+ $modules/facter_facts.py:
+ labels: facter
+ maintainers: russoz $team_ansible_core gamethis
+ $modules/filesize.py:
+ maintainers: quidame
+ $modules/filesystem.py:
+ labels: filesystem
+ maintainers: pilou- abulimov quidame
+ $modules/flatpak.py:
+ maintainers: $team_flatpak
+ $modules/flatpak_remote.py:
+ maintainers: $team_flatpak
+ $modules/gandi_livedns.py:
+ maintainers: gthiemonge
+ $modules/gconftool2.py:
+ labels: gconftool2
+ maintainers: Akasurde kevensen
+ $modules/gconftool2_info.py:
+ labels: gconftool2
+ maintainers: russoz
+ $modules/gem.py:
+ labels: gem
+ maintainers: $team_ansible_core johanwiren
+ $modules/gio_mime.py:
+ maintainers: russoz
+ $modules/git_config.py:
+ maintainers: djmattyg007 mgedmin
+ $modules/git_config_info.py:
+ maintainers: guenhter
+ $modules/github_:
maintainers: stpierre
- $modules/web_infrastructure/jenkins_plugin.py:
- maintainers: jtyr
- $modules/web_infrastructure/jenkins_script.py:
- maintainers: hogarthj
- $modules/web_infrastructure/jira.py:
- maintainers: Slezhuk tarka pertoft
- ignore: DWSR
- labels: jira
- $modules/web_infrastructure/nginx_status_info.py:
+ $modules/github_deploy_key.py:
+ maintainers: bincyber
+ $modules/github_issue.py:
+ maintainers: Akasurde
+ $modules/github_key.py:
+ ignore: erydo
+ labels: github_key
+ maintainers: erydo
+ $modules/github_release.py:
+ maintainers: adrianmoisey
+ $modules/github_repo.py:
+ maintainers: atorrescogollo
+ $modules/gitlab_:
+ keywords: gitlab source_control
+ maintainers: $team_gitlab
+ notify: jlozadad
+ ignore: dj-wasabi
+ $modules/gitlab_branch.py:
+ maintainers: paytroff
+ $modules/gitlab_issue.py:
+ maintainers: zvaraondrej
+ $modules/gitlab_label.py:
+ maintainers: gpongelli
+ $modules/gitlab_merge_request.py:
+ maintainers: zvaraondrej
+ $modules/gitlab_milestone.py:
+ maintainers: gpongelli
+ $modules/gitlab_project_variable.py:
+ maintainers: markuman
+ $modules/gitlab_instance_variable.py:
+ maintainers: benibr
+ $modules/gitlab_runner.py:
+ maintainers: SamyCoenen
+ $modules/gitlab_user.py:
+ maintainers: LennertMertens stgrace
+ $modules/gitlab_group_access_token.py:
+ maintainers: pixslx
+ $modules/gitlab_project_access_token.py:
+ maintainers: pixslx
+ $modules/grove.py:
+ maintainers: zimbatm
+ $modules/gunicorn.py:
+ maintainers: agmezr
+ $modules/haproxy.py:
+ maintainers: ravibhure Normo
+ $modules/heroku_collaborator.py:
+ maintainers: marns93
+ $modules/hg.py:
+ maintainers: yeukhon
+ $modules/homebrew.py:
+ ignore: ryansb
+ keywords: brew cask darwin homebrew macosx macports osx
+ labels: homebrew macos
+ maintainers: $team_macos andrew-d
+ notify: chris-short
+ $modules/homebrew_cask.py:
+ ignore: ryansb
+ keywords: brew cask darwin homebrew macosx macports osx
+ labels: homebrew_ macos
+ maintainers: $team_macos enriclluelles
+ notify: chris-short
+ $modules/homebrew_tap.py:
+ ignore: ryansb
+ keywords: brew cask darwin homebrew macosx macports osx
+ labels: homebrew_ macos
+ maintainers: $team_macos
+ notify: chris-short
+ $modules/homebrew_services.py:
+ ignore: ryansb
+ keywords: brew cask services darwin homebrew macosx macports osx
+ labels: homebrew_ macos
+ maintainers: $team_macos kitizz
+ $modules/homectl.py:
+ maintainers: jameslivulpi
+ $modules/honeybadger_deployment.py:
+ maintainers: stympy
+ $modules/hpilo_:
+ ignore: dagwieers
+ maintainers: haad
+ $modules/hponcfg.py:
+ ignore: dagwieers
+ maintainers: haad
+ $modules/htpasswd.py:
+ labels: htpasswd
+ maintainers: $team_ansible_core
+ $modules/hwc_:
+ keywords: cloud huawei hwc
+ maintainers: $team_huawei huaweicloud
+ $modules/ibm_sa_:
+ maintainers: tzure
+ $modules/icinga2_feature.py:
+ maintainers: nerzhul
+ $modules/icinga2_host.py:
+ maintainers: t794104
+ $modules/idrac_:
+ ignore: jose-delarosa
+ maintainers: $team_redfish
+ $modules/ilo_:
+ ignore: jose-delarosa varini-hp
+ maintainers: $team_redfish
+ $modules/imc_rest.py:
+ labels: cisco
+ maintainers: dagwieers
+ $modules/imgadm.py:
+ keywords: beadm dladm illumos ipadm nexenta omnios openindiana pfexec smartos solaris sunos zfs zpool
+ labels: solaris
+ maintainers: $team_solaris
+ $modules/infinity.py:
+ maintainers: MeganLiu
+ $modules/influxdb_:
+ maintainers: kamsz
+ $modules/influxdb_query.py:
maintainers: resmo
- $modules/web_infrastructure/rundeck_acl_policy.py:
+ $modules/influxdb_user.py:
+ maintainers: zhhuta
+ $modules/influxdb_write.py:
+ maintainers: resmo
+ $modules/ini_file.py:
+ maintainers: jpmens noseka1
+ $modules/installp.py:
+ keywords: aix efix lpar wpar
+ labels: aix installp
+ maintainers: $team_aix kairoaraujo
+ $modules/interfaces_file.py:
+ labels: interfaces_file
+ maintainers: obourdon hryamzik
+ $modules/ip_netns.py:
+ maintainers: bregman-arie
+ $modules/ipa_:
+ maintainers: $team_ipa
+ ignore: fxfitz
+ $modules/ipa_getkeytab.py:
+ maintainers: abakanovskii
+ $modules/ipa_dnsrecord.py:
+ maintainers: $team_ipa jwbernin
+ $modules/ipbase_info.py:
+ maintainers: dominikkukacka
+ $modules/ipa_pwpolicy.py:
+ maintainers: adralioh
+ $modules/ipa_service.py:
+ maintainers: cprh
+ $modules/ipa_vault.py:
+ maintainers: jparrill
+ $modules/ipify_facts.py:
+ maintainers: resmo
+ $modules/ipinfoio_facts.py:
+ maintainers: akostyuk
+ $modules/ipmi_:
+ maintainers: bgaifullin cloudnull
+ $modules/iptables_state.py:
+ maintainers: quidame
+ $modules/ipwcli_dns.py:
+ maintainers: cwollinger
+ $modules/irc.py:
+ maintainers: jpmens sivel
+ $modules/iso_create.py:
+ maintainers: Tomorrow9
+ $modules/iso_customize.py:
+ maintainers: ZouYuhua
+ $modules/iso_extract.py:
+ maintainers: dagwieers jhoekx ribbons
+ $modules/jabber.py:
+ maintainers: bcoca
+ $modules/java_cert.py:
+ maintainers: haad absynth76
+ $modules/java_keystore.py:
+ maintainers: Mogztter quidame
+ $modules/jboss.py:
+ labels: jboss
+ maintainers: $team_jboss jhoekx
+ $modules/jenkins_build.py:
+ maintainers: brettmilford unnecessary-username juanmcasanova
+ $modules/jenkins_build_info.py:
+ maintainers: juanmcasanova
+ $modules/jenkins_credential.py:
+ maintainers: YoussefKhalidAli
+ $modules/jenkins_job.py:
+ maintainers: sermilrod
+ $modules/jenkins_job_info.py:
+ maintainers: stpierre
+ $modules/jenkins_node.py:
+ maintainers: phyrwork
+ $modules/jenkins_plugin.py:
+ maintainers: jtyr
+ $modules/jenkins_script.py:
+ maintainers: hogarthj
+ $modules/jira.py:
+ ignore: DWSR tarka
+ labels: jira
+ maintainers: Slezhuk pertoft
+ $modules/kdeconfig.py:
+ maintainers: smeso
+ $modules/kernel_blacklist.py:
+ maintainers: matze
+ $modules/keycloak_:
+ maintainers: $team_keycloak
+ $modules/keycloak_authentication.py:
+ maintainers: elfelip Gaetan2907
+ $modules/keycloak_authentication_required_actions.py:
+ maintainers: Skrekulko
+ $modules/keycloak_authz_authorization_scope.py:
+ maintainers: mattock
+ $modules/keycloak_authz_permission.py:
+ maintainers: mattock
+ $modules/keycloak_authz_custom_policy.py:
+ maintainers: mattock
+ $modules/keycloak_authz_permission_info.py:
+ maintainers: mattock
+ $modules/keycloak_client_rolemapping.py:
+ maintainers: Gaetan2907
+ $modules/keycloak_clientscope.py:
+ maintainers: Gaetan2907
+ $modules/keycloak_clientscope_type.py:
+ maintainers: simonpahl
+ $modules/keycloak_clientsecret_info.py:
+ maintainers: fynncfchen johncant
+ $modules/keycloak_clientsecret_regenerate.py:
+ maintainers: fynncfchen johncant
+ $modules/keycloak_component.py:
+ maintainers: fivetide
+ $modules/keycloak_group.py:
+ maintainers: adamgoossens
+ $modules/keycloak_identity_provider.py:
+ maintainers: laurpaum
+ $modules/keycloak_realm.py:
+ maintainers: kris2kris
+ $modules/keycloak_realm_info.py:
+ maintainers: fynncfchen
+ $modules/keycloak_realm_key.py:
+ maintainers: mattock
+ $modules/keycloak_role.py:
+ maintainers: laurpaum
+ $modules/keycloak_user.py:
+ maintainers: elfelip
+ $modules/keycloak_user_federation.py:
+ maintainers: laurpaum
+ $modules/keycloak_userprofile.py:
+ maintainers: yeoldegrove
+ $modules/keycloak_component_info.py:
+ maintainers: desand01
+ $modules/keycloak_client_rolescope.py:
+ maintainers: desand01
+ $modules/keycloak_user_rolemapping.py:
+ maintainers: bratwurzt
+ $modules/keycloak_realm_rolemapping.py:
+ maintainers: agross mhuysamen Gaetan2907
+ $modules/keyring.py:
+ maintainers: ahussey-redhat
+ $modules/keyring_info.py:
+ maintainers: ahussey-redhat
+ $modules/kibana_plugin.py:
+ maintainers: barryib
+ $modules/krb_ticket.py:
+ maintainers: abakanovskii
+ $modules/launchd.py:
+ maintainers: martinm82
+ $modules/layman.py:
+ maintainers: jirutka
+ $modules/lbu.py:
+ maintainers: kunkku
+ $modules/ldap_attrs.py:
+ maintainers: drybjed jtyr noles
+ $modules/ldap_entry.py:
+ maintainers: jtyr
+ $modules/ldap_inc.py:
+ maintainers: pduveau
+ $modules/ldap_passwd.py:
+ maintainers: KellerFuchs jtyr
+ $modules/ldap_search.py:
+ maintainers: eryx12o45 jtyr
+ $modules/librato_annotation.py:
+ maintainers: Sedward
+ $modules/linode:
+ maintainers: $team_linode
+ $modules/linode.py:
+ maintainers: zbal
+ $modules/listen_ports_facts.py:
+ maintainers: ndavison
+ $modules/lldp.py:
+ ignore: andyhky
+ labels: lldp
+ $modules/locale_gen.py:
+ maintainers: AugustusKling
+ $modules/logentries.py:
+ ignore: ivanvanderbyl
+ labels: logentries
+ $modules/logentries_msg.py:
+ maintainers: jcftang
+ $modules/logstash_plugin.py:
maintainers: nerzhul
- $modules/web_infrastructure/rundeck_project.py:
+ $modules/lvg.py:
+ maintainers: abulimov
+ $modules/lvm_pv.py:
+ maintainers: klention
+ $modules/lvm_pv_move_data.py:
+ maintainers: klention
+ $modules/lvg_rename.py:
+ maintainers: lszomor
+ $modules/lvol.py:
+ maintainers: abulimov jhoekx zigaSRC unkaputtbar112
+ $modules/lxc_container.py:
+ maintainers: cloudnull
+ $modules/lxca_:
+ maintainers: navalkp prabhosa
+ $modules/lxd_:
+ ignore: hnakamur
+ $modules/lxd_profile.py:
+ maintainers: conloos
+ $modules/lxd_project.py:
+ maintainers: we10710aa
+ $modules/macports.py:
+ ignore: ryansb
+ keywords: brew cask darwin homebrew macosx macports osx
+ labels: macos macports
+ maintainers: $team_macos jcftang
+ notify: chris-short
+ $modules/mail.py:
+ maintainers: dagwieers
+ $modules/make.py:
+ maintainers: LinusU
+ $modules/manageiq_:
+ labels: manageiq
+ maintainers: $team_manageiq
+ $modules/manageiq_alert_profiles.py:
+ maintainers: elad661
+ $modules/manageiq_alerts.py:
+ maintainers: elad661
+ $modules/manageiq_group.py:
+ maintainers: evertmulder
+ $modules/manageiq_policies_info.py:
+ maintainers: russoz $team_manageiq
+ $modules/manageiq_tags_info.py:
+ maintainers: russoz $team_manageiq
+ $modules/manageiq_tenant.py:
+ maintainers: evertmulder
+ $modules/mas.py:
+ maintainers: lukasbestle mheap
+ $modules/matrix.py:
+ maintainers: jcgruenhage
+ $modules/mattermost.py:
+ maintainers: bjolivot
+ $modules/maven_artifact.py:
+ ignore: chrisisbeef
+ labels: maven_artifact
+ maintainers: tumbl3w33d turb
+ $modules/memset_:
+ ignore: glitchcrab
+ $modules/mksysb.py:
+ labels: aix mksysb
+ maintainers: $team_aix
+ $modules/modprobe.py:
+ ignore: stygstra
+ labels: modprobe
+ maintainers: jdauphant mattjeffery
+ $modules/monit.py:
+ labels: monit
+ maintainers: dstoflet brian-brazil snopoke
+ $modules/mqtt.py:
+ maintainers: jpmens
+ $modules/mssql_db.py:
+ labels: mssql_db
+ maintainers: vedit Jmainguy kenichi-ogawa-1988
+ $modules/mssql_script.py:
+ labels: mssql_script
+ maintainers: kbudde
+ $modules/nagios.py:
+ maintainers: tbielawa tgoetheyn
+ $modules/netcup_dns.py:
+ maintainers: nbuchwitz
+ $modules/newrelic_deployment.py:
+ ignore: mcodd
+ $modules/nexmo.py:
+ maintainers: sivel
+ $modules/nginx_status_info.py:
+ maintainers: resmo
+ $modules/nictagadm.py:
+ keywords: beadm dladm illumos ipadm nexenta omnios openindiana pfexec smartos solaris sunos zfs zpool
+ labels: solaris
+ maintainers: $team_solaris SmithX10
+ $modules/nmcli.py:
+ maintainers: alcamie101
+ $modules/nomad_:
+ maintainers: chris93111 apecnascimento
+ $modules/nosh.py:
+ maintainers: tacatac
+ $modules/npm.py:
+ ignore: chrishoffman
+ labels: npm
+ maintainers: shane-walker xcambar
+ $modules/nsupdate.py:
maintainers: nerzhul
- $modules/web_infrastructure/rundeck_job_run.py:
+ $modules/ocapi_command.py:
+ maintainers: $team_wdc
+ $modules/ocapi_info.py:
+ maintainers: $team_wdc
+ $modules/oci_vcn.py:
+ maintainers: $team_oracle rohitChaware
+ $modules/odbc.py:
+ maintainers: john-westcott-iv
+ $modules/office_365_connector_card.py:
+ maintainers: marc-sensenich
+ $modules/ohai.py:
+ labels: ohai
+ maintainers: $team_ansible_core
+ ignore: mpdehaan
+ $modules/omapi_host.py:
+ maintainers: amasolov nerzhul
+ $modules/one_:
+ maintainers: $team_opennebula
+ $modules/one_host.py:
+ maintainers: rvalle
+ $modules/one_vnet.py:
+ maintainers: abakanovskii
+ $modules/oneandone_:
+ maintainers: aajdinov edevenport
+ $modules/onepassword_info.py:
+ maintainers: Rylon
+ $modules/oneview_:
+ maintainers: adriane-cardozo fgbulsoni tmiotto
+ $modules/oneview_datacenter_info.py:
+ maintainers: aalexmonteiro madhav-bharadwaj ricardogpsf soodpr
+ $modules/oneview_fc_network.py:
+ maintainers: fgbulsoni
+ $modules/oneview_fcoe_network.py:
+ maintainers: fgbulsoni
+ $modules/online_:
+ maintainers: remyleone
+ $modules/open_iscsi.py:
+ maintainers: srvg
+ $modules/openbsd_pkg.py:
+ ignore: ryansb
+ keywords: doas dragonfly freebsd iocage jail netbsd openbsd opnsense pfsense
+ labels: bsd openbsd_pkg
+ maintainers: $team_bsd eest
+ $modules/opendj_backendprop.py:
+ maintainers: dj-wasabi
+ $modules/openwrt_init.py:
+ maintainers: agaffney
+ $modules/opkg.py:
+ maintainers: skinp
+ $modules/osx_defaults.py:
+ keywords: brew cask darwin homebrew macosx macports osx
+ labels: macos osx_defaults
+ maintainers: $team_macos notok
+ notify: chris-short
+ $modules/ovh_:
+ maintainers: pascalheraud
+ $modules/ovh_monthly_billing.py:
+ maintainers: fraff
+ $modules/pacemaker_cluster.py:
+ maintainers: matbu munchtoast
+ $modules/pacemaker_info.py:
+ maintainers: munchtoast
+ $modules/pacemaker_resource.py:
+ maintainers: munchtoast
+ $modules/pacemaker_stonith.py:
+ maintainers: munchtoast
+ $modules/packet_:
+ maintainers: nurfet-becirevic t0mk
+ $modules/packet_device.py:
+ maintainers: baldwinSPC t0mk teebes
+ $modules/packet_sshkey.py:
+ maintainers: t0mk
+ $modules/pacman.py:
+ ignore: elasticdog
+ labels: pacman
+ maintainers: elasticdog indrajitr tchernomax jraby
+ $modules/pacman_key.py:
+ labels: pacman
+ maintainers: grawlinson
+ $modules/pagerduty.py:
+ ignore: bpennypacker
+ labels: pagerduty
+ maintainers: suprememoocow thaumos
+ $modules/pagerduty_alert.py:
+ maintainers: ApsOps xshen1
+ $modules/pagerduty_change.py:
+ maintainers: adamvaughan
+ $modules/pagerduty_user.py:
+ maintainers: zanssa
+ $modules/pam_limits.py:
+ ignore: usawa
+ labels: pam_limits
+ maintainers: giovannisciortino
+ $modules/pamd.py:
+ maintainers: kevensen
+ $modules/parted.py:
+ maintainers: ColOfAbRiX jake2184
+ $modules/pear.py:
+ ignore: jle64
+ labels: pear
+ $modules/pids.py:
+ maintainers: saranyasridharan
+ $modules/pingdom.py:
+ maintainers: thaumos
+ $modules/pip_package_info.py:
+ maintainers: bcoca matburt maxamillion
+ $modules/pipx.py:
+ maintainers: russoz
+ $modules/pipx_info.py:
+ maintainers: russoz
+ $modules/pkg5:
+ keywords: beadm dladm illumos ipadm nexenta omnios openindiana pfexec smartos solaris sunos zfs zpool
+ labels: pkg5 solaris
+ maintainers: $team_solaris mavit
+ $modules/pkgin.py:
+ labels: pkgin solaris
+ maintainers: $team_solaris L2G jasperla szinck martinm82
+ $modules/pkgng.py:
+ ignore: bleader
+ keywords: doas dragonfly freebsd iocage jail netbsd openbsd opnsense pfsense
+ labels: bsd pkgng
+ maintainers: $team_bsd bleader
+ $modules/pkgutil.py:
+ labels: pkgutil solaris
+ maintainers: $team_solaris dermute
+ $modules/pmem.py:
+ maintainers: mizumm
+ $modules/pnpm.py:
+ ignore: chrishoffman
+ maintainers: aretrosen
+ $modules/portage.py:
+ ignore: sayap
+ labels: portage
+ maintainers: Tatsh wltjr
+ $modules/portinstall.py:
+ ignore: ryansb
+ keywords: doas dragonfly freebsd iocage jail netbsd openbsd opnsense pfsense
+ labels: bsd portinstall
+ maintainers: $team_bsd berenddeboer
+ $modules/pritunl_:
+ maintainers: Lowess
+ $modules/pubnub_blocks.py:
+ maintainers: parfeon pubnub
+ $modules/pulp_repo.py:
+ maintainers: sysadmind
+ $modules/puppet.py:
+ labels: puppet
+ maintainers: emonty
+ $modules/pushbullet.py:
+ maintainers: willybarro
+ $modules/pushover.py:
+ maintainers: weaselkeeper wopfel
+ $modules/python_requirements_info.py:
+ ignore: ryansb
+ maintainers: willthames
+ $modules/read_csv.py:
+ maintainers: dagwieers
+ $modules/redfish_:
+ ignore: jose-delarosa
+ maintainers: $team_redfish TSKushal
+ $modules/redhat_subscription.py:
+ labels: redhat_subscription
+ maintainers: $team_rhsm
+ ignore: barnabycourt alikins kahowell
+ $modules/redis.py:
+ maintainers: slok
+ $modules/redis_data.py:
+ maintainers: paginabianca
+ $modules/redis_data_incr.py:
+ maintainers: paginabianca
+ $modules/redis_data_info.py:
+ maintainers: paginabianca
+ $modules/redis_info.py:
+ maintainers: levonet
+ $modules/rhevm.py:
+ ignore: skvidal
+ keywords: kvm libvirt proxmox qemu
+ labels: rhevm virt
+ maintainers: $team_virt TimothyVandenbrande
+ $modules/rhsm_release.py:
+ maintainers: seandst $team_rhsm
+ $modules/rhsm_repository.py:
+ maintainers: giovannisciortino $team_rhsm
+ $modules/riak.py:
+ maintainers: drewkerrigan jsmartin
+ $modules/rocketchat.py:
+ ignore: ramondelafuente
+ labels: rocketchat
+ maintainers: Deepakkothandan
+ $modules/rollbar_deployment.py:
+ maintainers: kavu
+ $modules/rpm_ostree_pkg.py:
+ maintainers: dustymabe Akasurde
+ $modules/rundeck_acl_policy.py:
+ maintainers: nerzhul
+ $modules/rundeck_job_executions_info.py:
maintainers: phsmith
- $modules/web_infrastructure/rundeck_job_executions_info.py:
+ $modules/rundeck_job_run.py:
maintainers: phsmith
- $modules/web_infrastructure/sophos_utm/:
- maintainers: $team_e_spirit
- keywords: sophos utm
- $modules/web_infrastructure/sophos_utm/utm_proxy_auth_profile.py:
- maintainers: $team_e_spirit stearz
- keywords: sophos utm
- $modules/web_infrastructure/sophos_utm/utm_proxy_exception.py:
- maintainers: $team_e_spirit RickS-C137
- keywords: sophos utm
- $modules/web_infrastructure/sophos_utm/utm_ca_host_key_cert.py:
- maintainers: stearz
- $modules/web_infrastructure/sophos_utm/utm_ca_host_key_cert_info.py:
- maintainers: stearz
- $modules/web_infrastructure/sophos_utm/utm_network_interface_address.py:
- maintainers: steamx
- $modules/web_infrastructure/sophos_utm/utm_network_interface_address_info.py:
- maintainers: steamx
- $modules/web_infrastructure/supervisorctl.py:
+ $modules/rundeck_project.py:
+ maintainers: nerzhul
+ $modules/runit.py:
+ maintainers: jsumners
+ $modules/say.py:
+ maintainers: $team_ansible_core
+ ignore: mpdehaan
+ $modules/scaleway_:
+ maintainers: $team_scaleway
+ $modules/scaleway_compute_private_network.py:
+ maintainers: pastral
+ $modules/scaleway_container.py:
+ maintainers: Lunik
+ $modules/scaleway_container_info.py:
+ maintainers: Lunik
+ $modules/scaleway_container_namespace.py:
+ maintainers: Lunik
+ $modules/scaleway_container_namespace_info.py:
+ maintainers: Lunik
+ $modules/scaleway_container_registry.py:
+ maintainers: Lunik
+ $modules/scaleway_container_registry_info.py:
+ maintainers: Lunik
+ $modules/scaleway_database_backup.py:
+ maintainers: guillaume_ro_fr
+ $modules/scaleway_function.py:
+ maintainers: Lunik
+ $modules/scaleway_function_info.py:
+ maintainers: Lunik
+ $modules/scaleway_function_namespace.py:
+ maintainers: Lunik
+ $modules/scaleway_function_namespace_info.py:
+ maintainers: Lunik
+ $modules/scaleway_image_info.py:
+ maintainers: Spredzy
+ $modules/scaleway_ip_info.py:
+ maintainers: Spredzy
+ $modules/scaleway_organization_info.py:
+ maintainers: Spredzy
+ $modules/scaleway_private_network.py:
+ maintainers: pastral
+ $modules/scaleway_security_group.py:
+ maintainers: DenBeke
+ $modules/scaleway_security_group_info.py:
+ maintainers: Spredzy
+ $modules/scaleway_security_group_rule.py:
+ maintainers: DenBeke
+ $modules/scaleway_server_info.py:
+ maintainers: Spredzy
+ $modules/scaleway_snapshot_info.py:
+ maintainers: Spredzy
+ $modules/scaleway_volume.py:
+ ignore: hekonsek
+ labels: scaleway_volume
+ $modules/scaleway_volume_info.py:
+ maintainers: Spredzy
+ $modules/sefcontext.py:
+ maintainers: dagwieers
+ $modules/selinux_permissive.py:
+ maintainers: mscherer
+ $modules/selogin.py:
+ maintainers: bachradsusi dankeder jamescassell
+ $modules/sendgrid.py:
+ maintainers: makaimc
+ $modules/sensu_:
+ maintainers: dmsimard
+ $modules/sensu_check.py:
+ maintainers: andsens
+ $modules/sensu_silence.py:
+ maintainers: smbambling
+ $modules/sensu_subscription.py:
+ maintainers: andsens
+ $modules/seport.py:
+ maintainers: dankeder
+ $modules/serverless.py:
+ ignore: ryansb
+ $modules/shutdown.py:
+ maintainers: nitzmahone samdoran aminvakil
+ $modules/simpleinit_msb.py:
+ maintainers: vaygr
+ $modules/sl_vm.py:
+ maintainers: mcltn
+ $modules/slack.py:
+ maintainers: ramondelafuente
+ $modules/slackpkg.py:
+ maintainers: KimNorgaard
+ $modules/smartos_image_info.py:
+ keywords: beadm dladm illumos ipadm nexenta omnios openindiana pfexec smartos solaris sunos zfs zpool
+ labels: solaris
+ maintainers: $team_solaris
+ $modules/snap.py:
+ labels: snap
+ maintainers: angristan vcarceler russoz
+ $modules/snap_alias.py:
+ labels: snap
+ maintainers: russoz
+ $modules/snmp_facts.py:
+ maintainers: ogenstad ujwalkomarla
+ $modules/solaris_zone.py:
+ keywords: beadm dladm illumos ipadm nexenta omnios openindiana pfexec smartos solaris sunos zfs zpool
+ labels: solaris
+ maintainers: $team_solaris pmarkham
+ $modules/sorcery.py:
+ maintainers: vaygr
+ $modules/spectrum_device.py:
+ maintainers: orgito
+ $modules/spectrum_model_attrs.py:
+ maintainers: tgates81
+ $modules/spotinst_aws_elastigroup.py:
+ maintainers: talzur
+ $modules/ss_3par_cpg.py:
+ maintainers: farhan7500 gautamphegde
+ $modules/ssh_config.py:
+ maintainers: gaqzi Akasurde
+ $modules/stacki_host.py:
+ labels: stacki_host
+ maintainers: bsanders bbyhuy
+ $modules/statsd.py:
+ maintainers: mamercad
+ $modules/statusio_maintenance.py:
+ maintainers: bhcopeland
+ $modules/sudoers.py:
+ maintainers: JonEllis
+ $modules/supervisorctl.py:
maintainers: inetfuture mattupstate
- $modules/web_infrastructure/taiga_issue.py:
+ $modules/svc.py:
+ maintainers: bcoca
+ $modules/svr4pkg.py:
+ labels: solaris svr4pkg
+ maintainers: $team_solaris brontitall
+ $modules/swdepot.py:
+ keywords: hp-ux
+ labels: hpux swdepot
+ maintainers: $team_hpux melodous
+ $modules/swupd.py:
+ labels: swupd
+ maintainers: hnanni albertomurillo
+ $modules/syslogger.py:
+ maintainers: garbled1
+ $modules/syspatch.py:
+ maintainers: precurse
+ $modules/sysrc.py:
+ maintainers: dlundgren
+ $modules/systemd_creds_decrypt.py:
+ maintainers: konstruktoid
+ $modules/systemd_creds_encrypt.py:
+ maintainers: konstruktoid
+ $modules/systemd_info.py:
+ maintainers: NomakCooper
+ $modules/sysupgrade.py:
+ maintainers: precurse
+ $modules/taiga_issue.py:
maintainers: lekum
+ $modules/telegram.py:
+ maintainers: tyouxa loms lomserman
+ $modules/terraform.py:
+ ignore: ryansb
+ maintainers: m-yosefpor rainerleber
+ $modules/timezone.py:
+ maintainers: indrajitr jasperla tmshn
+ $modules/twilio.py:
+ maintainers: makaimc
+ $modules/typetalk.py:
+ maintainers: tksmd
+ $modules/udm_:
+ maintainers: keachi
+ $modules/ufw.py:
+ labels: ufw
+ maintainers: ahtik ovcharenko pyykkis
+ notify: felixfontein
+ $modules/uptimerobot.py:
+ maintainers: nate-kingsley
+ $modules/urpmi.py:
+ maintainers: pmakowski
+ $modules/usb_facts.py:
+ maintainers: maxopoly
+ $modules/utm_:
+ keywords: sophos utm
+ maintainers: $team_e_spirit
+ $modules/utm_ca_host_key_cert.py:
+ ignore: stearz
+ maintainers: $team_e_spirit
+ $modules/utm_ca_host_key_cert_info.py:
+ ignore: stearz
+ maintainers: $team_e_spirit
+ $modules/utm_network_interface_address.py:
+ maintainers: steamx
+ $modules/utm_network_interface_address_info.py:
+ maintainers: steamx
+ $modules/utm_proxy_auth_profile.py:
+ keywords: sophos utm
+ ignore: stearz
+ maintainers: $team_e_spirit
+ $modules/utm_proxy_exception.py:
+ keywords: sophos utm
+ maintainers: $team_e_spirit RickS-C137
+ $modules/vdo.py:
+ maintainers: rhawalsh bgurney-rh
+ $modules/vertica_:
+ maintainers: dareko
+ $modules/vexata_:
+ maintainers: vexata
+ $modules/vmadm.py:
+ keywords: beadm dladm illumos ipadm nexenta omnios openindiana pfexec smartos solaris sunos zfs zpool
+ labels: solaris
+ maintainers: $team_solaris
+ $modules/wakeonlan.py:
+ maintainers: dagwieers
+ $modules/wdc_:
+ ignore: jose-delarosa
+ maintainers: $team_redfish
+ $modules/wdc_redfish_command.py:
+ maintainers: $team_wdc
+ $modules/wdc_redfish_info.py:
+ maintainers: $team_wdc
+ $modules/xattr.py:
+ labels: xattr
+ maintainers: bcoca
+ $modules/xbps.py:
+ maintainers: dinoocch the-maldridge
+ $modules/xcc_:
+ maintainers: panyy3 renxulei
+ $modules/xdg_mime.py:
+ maintainers: mhalano
+ $modules/xenserver_:
+ maintainers: bvitnik
+ $modules/xenserver_facts.py:
+ ignore: andyhky ryansb
+ labels: xenserver_facts
+ maintainers: caphrim007 cheese
+ $modules/xfconf.py:
+ labels: xfconf
+ maintainers: russoz jbenden
+ $modules/xfconf_info.py:
+ labels: xfconf
+ maintainers: russoz
+ $modules/xfs_quota.py:
+ maintainers: bushvin
+ $modules/xml.py:
+ ignore: magnus919
+ labels: m:xml xml
+ maintainers: dagwieers magnus919 tbielawa cmprescott sm4rk0
+ $modules/yarn.py:
+ ignore: chrishoffman verkaufer
+ $modules/yum_versionlock.py:
+ maintainers: gyptazy aminvakil
+ $modules/zfs:
+ keywords: beadm dladm illumos ipadm nexenta omnios openindiana pfexec smartos solaris sunos zfs zpool
+ labels: solaris
+ maintainers: $team_solaris
+ $modules/zfs.py:
+ maintainers: johanwiren
+ $modules/zfs_delegate_admin.py:
+ maintainers: natefoo
+ $modules/znode.py:
+ maintainers: treyperry
+ $modules/zpool.py:
+ maintainers: tomhesse
+ $modules/zpool_facts:
+ keywords: beadm dladm illumos ipadm nexenta omnios openindiana pfexec smartos solaris sunos zfs zpool
+ labels: solaris
+ maintainers: $team_solaris
+ $modules/zypper.py:
+ ignore: dirtyharrycallahan robinro
+ labels: zypper
+ maintainers: $team_suse
+ $modules/zypper_repository.py:
+ ignore: matze
+ labels: zypper
+ maintainers: $team_suse
+ $plugin_utils/ansible_type.py:
+ maintainers: vbotka
+ $modules/zypper_repository_info.py:
+ labels: zypper
+ maintainers: $team_suse TobiasZeuch181
+ $plugin_utils/keys_filter.py:
+ maintainers: vbotka
+ $plugin_utils/unsafe.py:
+ maintainers: felixfontein
$tests/a_module.py:
maintainers: felixfontein
+ $tests/ansible_type.py:
+ maintainers: vbotka
+ $tests/fqdn_valid.py:
+ maintainers: vbotka
+#########################
+ docs/docsite/rst/filter_guide.rst: {}
+ docs/docsite/rst/filter_guide_abstract_informations.rst: {}
+ docs/docsite/rst/filter_guide_abstract_informations_counting_elements_in_sequence.rst:
+ maintainers: keilr
+ docs/docsite/rst/filter_guide_abstract_informations_dictionaries.rst:
+ maintainers: felixfontein giner
+ docs/docsite/rst/filter_guide_abstract_informations_grouping.rst:
+ maintainers: felixfontein
+ docs/docsite/rst/filter_guide_abstract_informations_lists_helper.rst:
+ maintainers: cfiehe
+ docs/docsite/rst/filter_guide-abstract_informations-lists_of_dictionaries-keep_keys.rst:
+ maintainers: vbotka
+ docs/docsite/rst/filter_guide-abstract_informations-lists_of_dictionaries-remove_keys.rst:
+ maintainers: vbotka
+ docs/docsite/rst/filter_guide-abstract_informations-lists_of_dictionaries-replace_keys.rst:
+ maintainers: vbotka
+ docs/docsite/rst/filter_guide-abstract_informations-lists_of_dictionaries.rst:
+ maintainers: vbotka
+ docs/docsite/rst/filter_guide_abstract_informations_merging_lists_of_dictionaries.rst:
+ maintainers: vbotka
+ docs/docsite/rst/filter_guide_conversions.rst:
+ maintainers: Ajpantuso kellyjonbrazil
+ docs/docsite/rst/filter_guide_creating_identifiers.rst:
+ maintainers: Ajpantuso
+ docs/docsite/rst/filter_guide_paths.rst: {}
+ docs/docsite/rst/filter_guide_selecting_json_data.rst: {}
+ docs/docsite/rst/filter_guide_working_with_times.rst:
+ maintainers: resmo
+ docs/docsite/rst/filter_guide_working_with_unicode.rst:
+ maintainers: Ajpantuso
+ docs/docsite/rst/filter_guide_working_with_versions.rst:
+ maintainers: ericzolf
+ docs/docsite/rst/guide_alicloud.rst:
+ maintainers: xiaozhu36
+ docs/docsite/rst/guide_cmdrunner.rst:
+ maintainers: russoz
+ docs/docsite/rst/guide_deps.rst:
+ maintainers: russoz
+ docs/docsite/rst/guide_iocage.rst:
+ maintainers: russoz felixfontein
+ docs/docsite/rst/guide_iocage_inventory.rst:
+ maintainers: vbotka
+ docs/docsite/rst/guide_iocage_inventory_aliases.rst:
+ maintainers: vbotka
+ docs/docsite/rst/guide_iocage_inventory_basics.rst:
+ maintainers: vbotka
+ docs/docsite/rst/guide_iocage_inventory_dhcp.rst:
+ maintainers: vbotka
+ docs/docsite/rst/guide_iocage_inventory_hooks.rst:
+ maintainers: vbotka
+ docs/docsite/rst/guide_iocage_inventory_properties.rst:
+ maintainers: vbotka
+ docs/docsite/rst/guide_iocage_inventory_tags.rst:
+ maintainers: vbotka
+ docs/docsite/rst/guide_modulehelper.rst:
+ maintainers: russoz
+ docs/docsite/rst/guide_online.rst:
+ maintainers: remyleone
+ docs/docsite/rst/guide_packet.rst:
+ maintainers: baldwinSPC nurfet-becirevic t0mk teebes
+ docs/docsite/rst/guide_scaleway.rst:
+ maintainers: $team_scaleway
+ docs/docsite/rst/guide_uthelper.rst:
+ maintainers: russoz
+ docs/docsite/rst/guide_vardict.rst:
+ maintainers: russoz
+ docs/docsite/rst/test_guide.rst:
+ maintainers: felixfontein
#########################
tests/:
labels: tests
- tests/unit/:
- labels: unit
- support: community
tests/integration:
labels: integration
support: community
- tests/utils/:
- maintainers: gundalow
+ tests/unit/:
labels: unit
+ support: community
+ tests/utils/:
+ labels: unit
+ maintainers: gundalow
macros:
actions: plugins/action
becomes: plugins/become
caches: plugins/cache
callbacks: plugins/callback
- cliconfs: plugins/cliconf
connections: plugins/connection
doc_fragments: plugins/doc_fragments
filters: plugins/filter
@@ -1289,32 +1580,31 @@ macros:
lookups: plugins/lookup
module_utils: plugins/module_utils
modules: plugins/modules
- terminals: plugins/terminal
+ plugin_utils: plugins/plugin_utils
tests: plugins/test
team_ansible_core:
team_aix: MorrisA bcoca d-little flynn1973 gforster kairoaraujo marvin-sinister mator molekuul ramooncamacho wtcross
team_bsd: JoergFiedler MacLemon bcoca dch jasperla mekanix opoplawski overhacked tuxillo
- team_consul: sgargan
+ team_consul: sgargan apollo13 Ilgmi
team_cyberark_conjur: jvanderhoof ryanprior
team_e_spirit: MatrixCrawler getjack
team_flatpak: JayKayy oolongbrothers
- team_gitlab: Lunik Shaps dj-wasabi marwatk waheedi zanssa scodeman metanovii sh0shin nejch lgatellier suukit
+ team_gitlab: Lunik Shaps marwatk waheedi zanssa scodeman metanovii sh0shin nejch lgatellier suukit
team_hpux: bcoca davx8342
team_huawei: QijunPan TommyLike edisonxiang freesky-edward hwDCN niuzhenguo xuxiaowei0512 yanzhangi zengchen1024 zhongjun2
- team_ipa: Akasurde Nosmoht fxfitz justchris1
+ team_ipa: Akasurde Nosmoht justchris1
team_jboss: Wolfant jairojunior wbrefvem
- team_keycloak: eikef ndclt
+ team_keycloak: eikef ndclt mattock thomasbach-dev
team_linode: InTheCloudDan decentral1se displague rmcintosh Charliekenney23 LBGarber
team_macos: Akasurde kyleabenson martinm82 danieljaouen indrajitr
team_manageiq: abellotti cben gtanzillo yaacov zgalor dkorn evertmulder
team_networking: NilashishC Qalthos danielmellado ganeshrn justjais trishnaguha sganesh-infoblox privateip
team_opennebula: ilicmilan meerkampdvv rsmontero xorel nilsding
team_oracle: manojmeda mross22 nalsaber
- team_purestorage: bannaych dnix101 genegr lionmax opslounge raekins sdodsley sile16
- team_redfish: mraineri tomasg2012 xmadsen renxulei rajeevkallur bhavya06
- team_rhn: FlossWare alikins barnabycourt vritant
+ team_redfish: mraineri tomasg2012 xmadsen renxulei rajeevkallur bhavya06 jyundt
+ team_rhsm: cnsnyder ptoscano
team_scaleway: remyleone abarbare
team_solaris: bcoca fishman jasperla jpdasma mator scathatheworm troy2914 xen0l
- team_suse: commel evrardjp lrupp toabctl AnderEnder alxgu andytom sealor
- team_virt: joshainglis karmab tleguern Thulium-Drake Ajpantuso
+ team_suse: commel evrardjp lrupp AnderEnder alxgu andytom sealor
+ team_virt: joshainglis karmab Thulium-Drake Ajpantuso
team_wdc: mikemoerk
diff --git a/.github/ISSUE_TEMPLATE/bug_report.yml b/.github/ISSUE_TEMPLATE/bug_report.yml
index bd5030f2c2..4b1c1bfb95 100644
--- a/.github/ISSUE_TEMPLATE/bug_report.yml
+++ b/.github/ISSUE_TEMPLATE/bug_report.yml
@@ -7,147 +7,147 @@ name: Bug report
description: Create a report to help us improve
body:
-- type: markdown
- attributes:
- value: |
- ⚠
- Verify first that your issue is not [already reported on GitHub][issue search].
- Also test if the latest release and devel branch are affected too.
- *Complete **all** sections as described, this form is processed automatically.*
+ - type: markdown
+ attributes:
+ value: |
+ ⚠
+ Verify first that your issue is not [already reported on GitHub][issue search].
+ Also test if the latest release and devel branch are affected too.
+ *Complete **all** sections as described, this form is processed automatically.*
- [issue search]: https://github.com/ansible-collections/community.general/search?q=is%3Aissue&type=issues
+ [issue search]: https://github.com/ansible-collections/community.general/search?q=is%3Aissue&type=issues
-- type: textarea
- attributes:
- label: Summary
- description: Explain the problem briefly below.
- placeholder: >-
- When I try to do X with the collection from the main branch on GitHub, Y
- breaks in a way Z under the env E. Here are all the details I know
- about this problem...
- validations:
- required: true
-
-- type: dropdown
- attributes:
- label: Issue Type
- # FIXME: Once GitHub allows defining the default choice, update this
- options:
- - Bug Report
- validations:
- required: true
-
-- type: textarea
- attributes:
- # For smaller collections we could use a multi-select and hardcode the list
- # May generate this list via GitHub action and walking files under https://github.com/ansible-collections/community.general/tree/main/plugins
- # Select from list, filter as you type (`mysql` would only show the 3 mysql components)
- # OR freeform - doesn't seem to be supported in adaptivecards
- label: Component Name
- description: >-
- Write the short name of the module, plugin, task or feature below,
- *use your best guess if unsure*.
- placeholder: dnf, apt, yum, pip, user etc.
- validations:
- required: true
-
-- type: textarea
- attributes:
- label: Ansible Version
- description: >-
- Paste verbatim output from `ansible --version` between
- tripple backticks.
- value: |
- ```console (paste below)
- $ ansible --version
-
- ```
- validations:
- required: true
-
-- type: textarea
- attributes:
- label: Community.general Version
- description: >-
- Paste verbatim output from "ansible-galaxy collection list community.general"
- between tripple backticks.
- value: |
- ```console (paste below)
- $ ansible-galaxy collection list community.general
-
- ```
- validations:
- required: true
-
-- type: textarea
- attributes:
- label: Configuration
- description: >-
- If this issue has an example piece of YAML that can help to reproduce this problem, please provide it.
- This can be a piece of YAML from, e.g., an automation, script, scene or configuration.
- Paste verbatim output from `ansible-config dump --only-changed` between quotes
- value: |
- ```console (paste below)
- $ ansible-config dump --only-changed
-
- ```
-
-
-- type: textarea
- attributes:
- label: OS / Environment
- description: >-
- Provide all relevant information below, e.g. target OS versions,
- network device firmware, etc.
- placeholder: RHEL 8, CentOS Stream etc.
- validations:
- required: false
-
-
-- type: textarea
- attributes:
- label: Steps to Reproduce
- description: |
- Describe exactly how to reproduce the problem, using a minimal test-case. It would *really* help us understand your problem if you could also passed any playbooks, configs and commands you used.
-
- **HINT:** You can paste https://gist.github.com links for larger files.
- value: |
-
- ```yaml (paste below)
-
- ```
- validations:
- required: true
-
-- type: textarea
- attributes:
- label: Expected Results
- description: >-
- Describe what you expected to happen when running the steps above.
- placeholder: >-
- I expected X to happen because I assumed Y.
- that it did not.
- validations:
- required: true
-
-- type: textarea
- attributes:
- label: Actual Results
- description: |
- Describe what actually happened. If possible run with extra verbosity (`-vvvv`).
-
- Paste verbatim command output between quotes.
- value: |
- ```console (paste below)
-
- ```
-- type: checkboxes
- attributes:
- label: Code of Conduct
- description: |
- Read the [Ansible Code of Conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html?utm_medium=github&utm_source=issue_form--ansible-collections) first.
- options:
- - label: I agree to follow the Ansible Code of Conduct
+ - type: textarea
+ attributes:
+ label: Summary
+ description: Explain the problem briefly below.
+ placeholder: >-
+ When I try to do X with the collection from the main branch on GitHub, Y
+ breaks in a way Z under the env E. Here are all the details I know
+ about this problem...
+ validations:
required: true
+
+ - type: dropdown
+ attributes:
+ label: Issue Type
+ # FIXME: Once GitHub allows defining the default choice, update this
+ options:
+ - Bug Report
+ validations:
+ required: true
+
+ - type: textarea
+ attributes:
+ # For smaller collections we could use a multi-select and hardcode the list
+ # May generate this list via GitHub action and walking files under https://github.com/ansible-collections/community.general/tree/main/plugins
+ # Select from list, filter as you type (`mysql` would only show the 3 mysql components)
+ # OR freeform - doesn't seem to be supported in adaptivecards
+ label: Component Name
+ description: >-
+ Write the short name of the module, plugin, task or feature below,
+ *use your best guess if unsure*. Do not include `community.general.`!
+ placeholder: dnf, apt, yum, pip, user etc.
+ validations:
+ required: true
+
+ - type: textarea
+ attributes:
+ label: Ansible Version
+ description: >-
+ Paste verbatim output from `ansible --version` between
+ tripple backticks.
+ value: |
+ ```console (paste below)
+ $ ansible --version
+
+ ```
+ validations:
+ required: true
+
+ - type: textarea
+ attributes:
+ label: Community.general Version
+ description: >-
+ Paste verbatim output from "ansible-galaxy collection list community.general"
+ between tripple backticks.
+ value: |
+ ```console (paste below)
+ $ ansible-galaxy collection list community.general
+
+ ```
+ validations:
+ required: true
+
+ - type: textarea
+ attributes:
+ label: Configuration
+ description: >-
+ If this issue has an example piece of YAML that can help to reproduce this problem, please provide it.
+ This can be a piece of YAML from, e.g., an automation, script, scene or configuration.
+ Paste verbatim output from `ansible-config dump --only-changed` between quotes
+ value: |
+ ```console (paste below)
+ $ ansible-config dump --only-changed
+
+ ```
+
+
+ - type: textarea
+ attributes:
+ label: OS / Environment
+ description: >-
+ Provide all relevant information below, e.g. target OS versions,
+ network device firmware, etc.
+ placeholder: RHEL 8, CentOS Stream etc.
+ validations:
+ required: false
+
+
+ - type: textarea
+ attributes:
+ label: Steps to Reproduce
+ description: |
+ Describe exactly how to reproduce the problem, using a minimal test-case. It would *really* help us understand your problem if you could also passed any playbooks, configs and commands you used.
+
+ **HINT:** You can paste https://gist.github.com links for larger files.
+ value: |
+
+ ```yaml (paste below)
+
+ ```
+ validations:
+ required: true
+
+ - type: textarea
+ attributes:
+ label: Expected Results
+ description: >-
+ Describe what you expected to happen when running the steps above.
+ placeholder: >-
+ I expected X to happen because I assumed Y.
+ that it did not.
+ validations:
+ required: true
+
+ - type: textarea
+ attributes:
+ label: Actual Results
+ description: |
+ Describe what actually happened. If possible run with extra verbosity (`-vvvv`).
+
+ Paste verbatim command output between quotes.
+ value: |
+ ```console (paste below)
+
+ ```
+ - type: checkboxes
+ attributes:
+ label: Code of Conduct
+ description: |
+ Read the [Ansible Code of Conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html?utm_medium=github&utm_source=issue_form--ansible-collections) first.
+ options:
+ - label: I agree to follow the Ansible Code of Conduct
+ required: true
...
diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml
index 0cc2db058c..476eed516e 100644
--- a/.github/ISSUE_TEMPLATE/config.yml
+++ b/.github/ISSUE_TEMPLATE/config.yml
@@ -6,26 +6,26 @@
# Ref: https://help.github.com/en/github/building-a-strong-community/configuring-issue-templates-for-your-repository#configuring-the-template-chooser
blank_issues_enabled: false # default: true
contact_links:
-- name: Security bug report
- url: https://docs.ansible.com/ansible-core/devel/community/reporting_bugs_and_features.html?utm_medium=github&utm_source=issue_template_chooser_ansible_collections
- about: |
- Please learn how to report security vulnerabilities here.
+ - name: Security bug report
+ url: https://docs.ansible.com/ansible-core/devel/community/reporting_bugs_and_features.html?utm_medium=github&utm_source=issue_template_chooser_ansible_collections
+ about: |
+ Please learn how to report security vulnerabilities here.
- For all security related bugs, email security@ansible.com
- instead of using this issue tracker and you will receive
- a prompt response.
+ For all security related bugs, email security@ansible.com
+ instead of using this issue tracker and you will receive
+ a prompt response.
- For more information, see
- https://docs.ansible.com/ansible/latest/community/reporting_bugs_and_features.html
-- name: Ansible Code of Conduct
- url: https://docs.ansible.com/ansible/latest/community/code_of_conduct.html?utm_medium=github&utm_source=issue_template_chooser_ansible_collections
- about: Be nice to other members of the community.
-- name: Talks to the community
- url: https://docs.ansible.com/ansible/latest/community/communication.html?utm_medium=github&utm_source=issue_template_chooser#mailing-list-information
- about: Please ask and answer usage questions here
-- name: Working groups
- url: https://github.com/ansible/community/wiki
- about: Interested in improving a specific area? Become a part of a working group!
-- name: For Enterprise
- url: https://www.ansible.com/products/engine?utm_medium=github&utm_source=issue_template_chooser_ansible_collections
- about: Red Hat offers support for the Ansible Automation Platform
+ For more information, see
+ https://docs.ansible.com/ansible/latest/community/reporting_bugs_and_features.html
+ - name: Ansible Code of Conduct
+ url: https://docs.ansible.com/ansible/latest/community/code_of_conduct.html?utm_medium=github&utm_source=issue_template_chooser_ansible_collections
+ about: Be nice to other members of the community.
+ - name: Talks to the community
+ url: https://docs.ansible.com/ansible/latest/community/communication.html?utm_medium=github&utm_source=issue_template_chooser#mailing-list-information
+ about: Please ask and answer usage questions here
+ - name: Working groups
+ url: https://github.com/ansible/community/wiki
+ about: Interested in improving a specific area? Become a part of a working group!
+ - name: For Enterprise
+ url: https://www.ansible.com/products/engine?utm_medium=github&utm_source=issue_template_chooser_ansible_collections
+ about: Red Hat offers support for the Ansible Automation Platform
diff --git a/.github/ISSUE_TEMPLATE/documentation_report.yml b/.github/ISSUE_TEMPLATE/documentation_report.yml
index 3a2777f207..2ad4bce44a 100644
--- a/.github/ISSUE_TEMPLATE/documentation_report.yml
+++ b/.github/ISSUE_TEMPLATE/documentation_report.yml
@@ -8,122 +8,122 @@ description: Ask us about docs
# NOTE: issue body is enabled to allow screenshots
body:
-- type: markdown
- attributes:
- value: |
- ⚠
- Verify first that your issue is not [already reported on GitHub][issue search].
- Also test if the latest release and devel branch are affected too.
- *Complete **all** sections as described, this form is processed automatically.*
+ - type: markdown
+ attributes:
+ value: |
+ ⚠
+ Verify first that your issue is not [already reported on GitHub][issue search].
+ Also test if the latest release and devel branch are affected too.
+ *Complete **all** sections as described, this form is processed automatically.*
- [issue search]: https://github.com/ansible-collections/community.general/search?q=is%3Aissue&type=issues
+ [issue search]: https://github.com/ansible-collections/community.general/search?q=is%3Aissue&type=issues
-- type: textarea
- attributes:
- label: Summary
- description: |
- Explain the problem briefly below, add suggestions to wording or structure.
+ - type: textarea
+ attributes:
+ label: Summary
+ description: |
+ Explain the problem briefly below, add suggestions to wording or structure.
- **HINT:** Did you know the documentation has an `Edit on GitHub` link on every page?
- placeholder: >-
- I was reading the Collection documentation of version X and I'm having
- problems understanding Y. It would be very helpful if that got
- rephrased as Z.
- validations:
- required: true
-
-- type: dropdown
- attributes:
- label: Issue Type
- # FIXME: Once GitHub allows defining the default choice, update this
- options:
- - Documentation Report
- validations:
- required: true
-
-- type: input
- attributes:
- label: Component Name
- description: >-
- Write the short name of the rst file, module, plugin, task or
- feature below, *use your best guess if unsure*.
- placeholder: mysql_user
- validations:
- required: true
-
-- type: textarea
- attributes:
- label: Ansible Version
- description: >-
- Paste verbatim output from `ansible --version` between
- tripple backticks.
- value: |
- ```console (paste below)
- $ ansible --version
-
- ```
- validations:
- required: false
-
-- type: textarea
- attributes:
- label: Community.general Version
- description: >-
- Paste verbatim output from "ansible-galaxy collection list community.general"
- between tripple backticks.
- value: |
- ```console (paste below)
- $ ansible-galaxy collection list community.general
-
- ```
- validations:
- required: true
-
-- type: textarea
- attributes:
- label: Configuration
- description: >-
- Paste verbatim output from `ansible-config dump --only-changed` between quotes.
- value: |
- ```console (paste below)
- $ ansible-config dump --only-changed
-
- ```
- validations:
- required: false
-
-- type: textarea
- attributes:
- label: OS / Environment
- description: >-
- Provide all relevant information below, e.g. OS version,
- browser, etc.
- placeholder: Fedora 33, Firefox etc.
- validations:
- required: false
-
-- type: textarea
- attributes:
- label: Additional Information
- description: |
- Describe how this improves the documentation, e.g. before/after situation or screenshots.
-
- **Tip:** It's not possible to upload the screenshot via this field directly but you can use the last textarea in this form to attach them.
-
- **HINT:** You can paste https://gist.github.com links for larger files.
- placeholder: >-
- When the improvement is applied, it makes it more straightforward
- to understand X.
- validations:
- required: false
-
-- type: checkboxes
- attributes:
- label: Code of Conduct
- description: |
- Read the [Ansible Code of Conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html?utm_medium=github&utm_source=issue_form--ansible-collections) first.
- options:
- - label: I agree to follow the Ansible Code of Conduct
+ **HINT:** Did you know the documentation has an `Edit on GitHub` link on every page?
+ placeholder: >-
+ I was reading the Collection documentation of version X and I'm having
+ problems understanding Y. It would be very helpful if that got
+ rephrased as Z.
+ validations:
required: true
+
+ - type: dropdown
+ attributes:
+ label: Issue Type
+ # FIXME: Once GitHub allows defining the default choice, update this
+ options:
+ - Documentation Report
+ validations:
+ required: true
+
+ - type: input
+ attributes:
+ label: Component Name
+ description: >-
+ Write the short name of the file, module, plugin, task or feature below,
+ *use your best guess if unsure*. Do not include `community.general.`!
+ placeholder: mysql_user
+ validations:
+ required: true
+
+ - type: textarea
+ attributes:
+ label: Ansible Version
+ description: >-
+ Paste verbatim output from `ansible --version` between
+ tripple backticks.
+ value: |
+ ```console (paste below)
+ $ ansible --version
+
+ ```
+ validations:
+ required: false
+
+ - type: textarea
+ attributes:
+ label: Community.general Version
+ description: >-
+ Paste verbatim output from "ansible-galaxy collection list community.general"
+ between tripple backticks.
+ value: |
+ ```console (paste below)
+ $ ansible-galaxy collection list community.general
+
+ ```
+ validations:
+ required: true
+
+ - type: textarea
+ attributes:
+ label: Configuration
+ description: >-
+ Paste verbatim output from `ansible-config dump --only-changed` between quotes.
+ value: |
+ ```console (paste below)
+ $ ansible-config dump --only-changed
+
+ ```
+ validations:
+ required: false
+
+ - type: textarea
+ attributes:
+ label: OS / Environment
+ description: >-
+ Provide all relevant information below, e.g. OS version,
+ browser, etc.
+ placeholder: Fedora 33, Firefox etc.
+ validations:
+ required: false
+
+ - type: textarea
+ attributes:
+ label: Additional Information
+ description: |
+ Describe how this improves the documentation, e.g. before/after situation or screenshots.
+
+ **Tip:** It's not possible to upload the screenshot via this field directly but you can use the last textarea in this form to attach them.
+
+ **HINT:** You can paste https://gist.github.com links for larger files.
+ placeholder: >-
+ When the improvement is applied, it makes it more straightforward
+ to understand X.
+ validations:
+ required: false
+
+ - type: checkboxes
+ attributes:
+ label: Code of Conduct
+ description: |
+ Read the [Ansible Code of Conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html?utm_medium=github&utm_source=issue_form--ansible-collections) first.
+ options:
+ - label: I agree to follow the Ansible Code of Conduct
+ required: true
...
diff --git a/.github/ISSUE_TEMPLATE/feature_request.yml b/.github/ISSUE_TEMPLATE/feature_request.yml
index 9630b67e12..dc62f94c5c 100644
--- a/.github/ISSUE_TEMPLATE/feature_request.yml
+++ b/.github/ISSUE_TEMPLATE/feature_request.yml
@@ -7,67 +7,67 @@ name: Feature request
description: Suggest an idea for this project
body:
-- type: markdown
- attributes:
- value: |
- ⚠
- Verify first that your issue is not [already reported on GitHub][issue search].
- Also test if the latest release and devel branch are affected too.
- *Complete **all** sections as described, this form is processed automatically.*
+ - type: markdown
+ attributes:
+ value: |
+ ⚠
+ Verify first that your issue is not [already reported on GitHub][issue search].
+ Also test if the latest release and devel branch are affected too.
+ *Complete **all** sections as described, this form is processed automatically.*
- [issue search]: https://github.com/ansible-collections/community.general/search?q=is%3Aissue&type=issues
+ [issue search]: https://github.com/ansible-collections/community.general/search?q=is%3Aissue&type=issues
-- type: textarea
- attributes:
- label: Summary
- description: Describe the new feature/improvement briefly below.
- placeholder: >-
- I am trying to do X with the collection from the main branch on GitHub and
- I think that implementing a feature Y would be very helpful for me and
- every other user of community.general because of Z.
- validations:
- required: true
-
-- type: dropdown
- attributes:
- label: Issue Type
- # FIXME: Once GitHub allows defining the default choice, update this
- options:
- - Feature Idea
- validations:
- required: true
-
-- type: input
- attributes:
- label: Component Name
- description: >-
- Write the short name of the module, plugin, task or feature below,
- *use your best guess if unsure*.
- placeholder: dnf, apt, yum, pip, user etc.
- validations:
- required: true
-
-- type: textarea
- attributes:
- label: Additional Information
- description: |
- Describe how the feature would be used, why it is needed and what it would solve.
-
- **HINT:** You can paste https://gist.github.com links for larger files.
- value: |
-
- ```yaml (paste below)
-
- ```
- validations:
- required: false
-- type: checkboxes
- attributes:
- label: Code of Conduct
- description: |
- Read the [Ansible Code of Conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html?utm_medium=github&utm_source=issue_form--ansible-collections) first.
- options:
- - label: I agree to follow the Ansible Code of Conduct
+ - type: textarea
+ attributes:
+ label: Summary
+ description: Describe the new feature/improvement briefly below.
+ placeholder: >-
+ I am trying to do X with the collection from the main branch on GitHub and
+ I think that implementing a feature Y would be very helpful for me and
+ every other user of community.general because of Z.
+ validations:
required: true
+
+ - type: dropdown
+ attributes:
+ label: Issue Type
+ # FIXME: Once GitHub allows defining the default choice, update this
+ options:
+ - Feature Idea
+ validations:
+ required: true
+
+ - type: input
+ attributes:
+ label: Component Name
+ description: >-
+ Write the short name of the module or plugin, or which other part(s) of the collection this feature affects.
+ *use your best guess if unsure*. Do not include `community.general.`!
+ placeholder: dnf, apt, yum, pip, user etc.
+ validations:
+ required: true
+
+ - type: textarea
+ attributes:
+ label: Additional Information
+ description: |
+ Describe how the feature would be used, why it is needed and what it would solve.
+
+ **HINT:** You can paste https://gist.github.com links for larger files.
+ value: |
+
+ ```yaml (paste below)
+
+ ```
+ validations:
+ required: false
+ - type: checkboxes
+ attributes:
+ label: Code of Conduct
+ description: |
+ Read the [Ansible Code of Conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html?utm_medium=github&utm_source=issue_form--ansible-collections) first.
+ options:
+ - label: I agree to follow the Ansible Code of Conduct
+ required: true
...
diff --git a/.github/dependabot.yml b/.github/dependabot.yml
index 2f4ff900d8..f71b322d2a 100644
--- a/.github/dependabot.yml
+++ b/.github/dependabot.yml
@@ -9,3 +9,7 @@ updates:
directory: "/"
schedule:
interval: "weekly"
+ groups:
+ ci:
+ patterns:
+ - "*"
diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md
new file mode 100644
index 0000000000..29a2d2e36a
--- /dev/null
+++ b/.github/pull_request_template.md
@@ -0,0 +1,32 @@
+##### SUMMARY
+
+
+
+
+
+
+##### ISSUE TYPE
+
+- Bugfix Pull Request
+- Docs Pull Request
+- Feature Pull Request
+- New Module/Plugin Pull Request
+- Refactoring Pull Request
+- Test Pull Request
+
+##### COMPONENT NAME
+
+
+##### ADDITIONAL INFORMATION
+
+
+
+
+```paste below
+
+```
diff --git a/docs/docsite/helper/lists_mergeby/list3.out.j2 b/.github/pull_request_template.md.license
similarity index 79%
rename from docs/docsite/helper/lists_mergeby/list3.out.j2
rename to .github/pull_request_template.md.license
index b51f6b8681..a1390a69ed 100644
--- a/docs/docsite/helper/lists_mergeby/list3.out.j2
+++ b/.github/pull_request_template.md.license
@@ -1,7 +1,3 @@
-{#
Copyright (c) Ansible Project
GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
SPDX-License-Identifier: GPL-3.0-or-later
-#}
-list3:
-{{ list3|to_nice_yaml(indent=0) }}
diff --git a/.github/workflows/ansible-test.yml b/.github/workflows/ansible-test.yml
new file mode 100644
index 0000000000..616c7a843c
--- /dev/null
+++ b/.github/workflows/ansible-test.yml
@@ -0,0 +1,176 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# For the comprehensive list of the inputs supported by the ansible-community/ansible-test-gh-action GitHub Action, see
+# https://github.com/marketplace/actions/ansible-test
+
+name: EOL CI
+"on":
+ # Run EOL CI against all pushes (direct commits, also merged PRs), Pull Requests
+ push:
+ branches:
+ - main
+ - stable-*
+ pull_request:
+ # Run EOL CI once per day (at 08:00 UTC)
+ schedule:
+ - cron: '0 8 * * *'
+
+concurrency:
+ # Make sure there is at most one active run per PR, but do not cancel any non-PR runs
+ group: ${{ github.workflow }}-${{ (github.head_ref && github.event.number) || github.run_id }}
+ cancel-in-progress: true
+
+jobs:
+ sanity:
+ name: EOL Sanity (Ⓐ${{ matrix.ansible }})
+ strategy:
+ matrix:
+ ansible:
+ - '2.17'
+ runs-on: ubuntu-latest
+ steps:
+ - name: Perform sanity testing
+ uses: felixfontein/ansible-test-gh-action@main
+ with:
+ ansible-core-version: stable-${{ matrix.ansible }}
+ codecov-token: ${{ secrets.CODECOV_TOKEN }}
+ coverage: ${{ github.event_name == 'schedule' && 'always' || 'never' }}
+ pull-request-change-detection: 'true'
+ testing-type: sanity
+ pre-test-cmd: >-
+ git clone --depth=1 --single-branch https://github.com/ansible-collections/community.internal_test_tools.git ../../community/internal_test_tools
+
+ units:
+ runs-on: ubuntu-latest
+ name: EOL Units (Ⓐ${{ matrix.ansible }}+py${{ matrix.python }})
+ strategy:
+ # As soon as the first unit test fails, cancel the others to free up the CI queue
+ fail-fast: true
+ matrix:
+ ansible:
+ - ''
+ python:
+ - ''
+ exclude:
+ - ansible: ''
+ include:
+ - ansible: '2.17'
+ python: '3.7'
+ - ansible: '2.17'
+ python: '3.10'
+ - ansible: '2.17'
+ python: '3.12'
+
+ steps:
+ - name: >-
+ Perform unit testing against
+ Ansible version ${{ matrix.ansible }}
+ uses: felixfontein/ansible-test-gh-action@main
+ with:
+ ansible-core-version: stable-${{ matrix.ansible }}
+ codecov-token: ${{ secrets.CODECOV_TOKEN }}
+ coverage: ${{ github.event_name == 'schedule' && 'always' || 'never' }}
+ pre-test-cmd: >-
+ mkdir -p ../../ansible
+ ;
+ git clone --depth=1 --single-branch https://github.com/ansible-collections/community.internal_test_tools.git ../../community/internal_test_tools
+ pull-request-change-detection: 'true'
+ target-python-version: ${{ matrix.python }}
+ testing-type: units
+
+ integration:
+ runs-on: ubuntu-latest
+ name: EOL I (Ⓐ${{ matrix.ansible }}+${{ matrix.docker }}+py${{ matrix.python }}:${{ matrix.target }})
+ strategy:
+ fail-fast: false
+ matrix:
+ ansible:
+ - ''
+ docker:
+ - ''
+ python:
+ - ''
+ target:
+ - ''
+ exclude:
+ - ansible: ''
+ include:
+ # 2.17
+ - ansible: '2.17'
+ docker: fedora39
+ python: ''
+ target: azp/posix/1/
+ - ansible: '2.17'
+ docker: fedora39
+ python: ''
+ target: azp/posix/2/
+ - ansible: '2.17'
+ docker: fedora39
+ python: ''
+ target: azp/posix/3/
+ - ansible: '2.17'
+ docker: ubuntu2004
+ python: ''
+ target: azp/posix/1/
+ - ansible: '2.17'
+ docker: ubuntu2004
+ python: ''
+ target: azp/posix/2/
+ - ansible: '2.17'
+ docker: ubuntu2004
+ python: ''
+ target: azp/posix/3/
+ - ansible: '2.17'
+ docker: alpine319
+ python: ''
+ target: azp/posix/1/
+ - ansible: '2.17'
+ docker: alpine319
+ python: ''
+ target: azp/posix/2/
+ - ansible: '2.17'
+ docker: alpine319
+ python: ''
+ target: azp/posix/3/
+ # Right now all generic tests are disabled. Uncomment when at least one of them is re-enabled.
+ # - ansible: '2.17'
+ # docker: default
+ # python: '3.7'
+ # target: azp/generic/1/
+ # - ansible: '2.17'
+ # docker: default
+ # python: '3.12'
+ # target: azp/generic/1/
+
+ steps:
+ - name: >-
+ Perform integration testing against
+ Ansible version ${{ matrix.ansible }}
+ under Python ${{ matrix.python }}
+ uses: felixfontein/ansible-test-gh-action@main
+ with:
+ ansible-core-version: stable-${{ matrix.ansible }}
+ codecov-token: ${{ secrets.CODECOV_TOKEN }}
+ coverage: ${{ github.event_name == 'schedule' && 'always' || 'never' }}
+ docker-image: ${{ matrix.docker }}
+ integration-continue-on-error: 'false'
+ integration-diff: 'false'
+ integration-retry-on-error: 'true'
+ # TODO: remove "--branch stable-2" from community.crypto install once we're only using ansible-core 2.17 or newer!
+ pre-test-cmd: >-
+ mkdir -p ../../ansible
+ ;
+ git clone --depth=1 --single-branch https://github.com/ansible-collections/ansible.posix.git ../../ansible/posix
+ ;
+ git clone --depth=1 --single-branch --branch stable-2 https://github.com/ansible-collections/community.crypto.git ../../community/crypto
+ ;
+ git clone --depth=1 --single-branch https://github.com/ansible-collections/community.docker.git ../../community/docker
+ ;
+ git clone --depth=1 --single-branch https://github.com/ansible-collections/community.internal_test_tools.git ../../community/internal_test_tools
+ pull-request-change-detection: 'true'
+ target: ${{ matrix.target }}
+ target-python-version: ${{ matrix.python }}
+ testing-type: integration
diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml
index f7ab9450cc..3c6776929d 100644
--- a/.github/workflows/codeql-analysis.yml
+++ b/.github/workflows/codeql-analysis.yml
@@ -5,9 +5,10 @@
name: "Code scanning - action"
-on:
+"on":
schedule:
- cron: '26 19 * * 1'
+ workflow_dispatch:
permissions:
contents: read
@@ -22,40 +23,16 @@ jobs:
runs-on: ubuntu-latest
steps:
- - name: Checkout repository
- uses: actions/checkout@v3
- with:
- # We must fetch at least the immediate parents so that if this is
- # a pull request then we can checkout the head.
- fetch-depth: 2
+ - name: Checkout repository
+ uses: actions/checkout@v5
+ with:
+ persist-credentials: false
- # If this run was triggered by a pull request event, then checkout
- # the head of the pull request instead of the merge commit.
- - run: git checkout HEAD^2
- if: ${{ github.event_name == 'pull_request' }}
+ # Initializes the CodeQL tools for scanning.
+ - name: Initialize CodeQL
+ uses: github/codeql-action/init@v4
+ with:
+ languages: python
- # Initializes the CodeQL tools for scanning.
- - name: Initialize CodeQL
- uses: github/codeql-action/init@v2
- # Override language selection by uncommenting this and choosing your languages
- # with:
- # languages: go, javascript, csharp, python, cpp, java
-
- # Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
- # If this step fails, then you should remove it and run the build manually (see below)
- - name: Autobuild
- uses: github/codeql-action/autobuild@v2
-
- # ℹ️ Command-line programs to run using the OS shell.
- # 📚 https://git.io/JvXDl
-
- # ✏️ If the Autobuild fails above, remove it and uncomment the following three lines
- # and modify them (or add more) to build your code if your project
- # uses a compiled language
-
- #- run: |
- # make bootstrap
- # make release
-
- - name: Perform CodeQL Analysis
- uses: github/codeql-action/analyze@v2
+ - name: Perform CodeQL Analysis
+ uses: github/codeql-action/analyze@v4
diff --git a/.github/workflows/nox.yml b/.github/workflows/nox.yml
new file mode 100644
index 0000000000..81c6563811
--- /dev/null
+++ b/.github/workflows/nox.yml
@@ -0,0 +1,28 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+name: nox
+'on':
+ push:
+ branches:
+ - main
+ - stable-*
+ pull_request:
+ # Run CI once per day (at 08:00 UTC)
+ schedule:
+ - cron: '0 8 * * *'
+ workflow_dispatch:
+
+jobs:
+ nox:
+ runs-on: ubuntu-latest
+ name: "Run extra sanity tests"
+ steps:
+ - name: Check out collection
+ uses: actions/checkout@v5
+ with:
+ persist-credentials: false
+ - name: Run nox
+ uses: ansible-community/antsibull-nox@main
diff --git a/.github/workflows/reuse.yml b/.github/workflows/reuse.yml
deleted file mode 100644
index a63b325f6c..0000000000
--- a/.github/workflows/reuse.yml
+++ /dev/null
@@ -1,32 +0,0 @@
----
-# Copyright (c) Ansible Project
-# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-name: Verify REUSE
-
-on:
- push:
- branches: [main]
- pull_request:
- branches: [main]
- # Run CI once per day (at 07:30 UTC)
- schedule:
- - cron: '30 7 * * *'
-
-jobs:
- check:
- permissions:
- contents: read
- runs-on: ubuntu-latest
-
- steps:
- - uses: actions/checkout@v2
-
- - name: Install dependencies
- run: |
- pip install reuse
-
- - name: Check REUSE compliance
- run: |
- reuse lint
diff --git a/.gitignore b/.gitignore
index c39969326d..e427699798 100644
--- a/.gitignore
+++ b/.gitignore
@@ -383,6 +383,16 @@ cython_debug/
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
#.idea/
+### Python Patch ###
+# Poetry local configuration file - https://python-poetry.org/docs/configuration/#local-configuration
+poetry.toml
+
+# ruff
+.ruff_cache/
+
+# LSP config files
+pyrightconfig.json
+
### Vim ###
# Swap
[._]*.s[a-v][a-z]
@@ -482,6 +492,10 @@ tags
# https://plugins.jetbrains.com/plugin/12206-codestream
.idea/codestream.xml
+# Azure Toolkit for IntelliJ plugin
+# https://plugins.jetbrains.com/plugin/8053-azure-toolkit-for-intellij
+.idea/**/azureSettings.xml
+
### Windows ###
# Windows thumbnail cache files
Thumbs.db
@@ -509,3 +523,11 @@ $RECYCLE.BIN/
*.lnk
# End of https://www.toptal.com/developers/gitignore/api/vim,git,macos,linux,pydev,emacs,dotenv,python,windows,webstorm,pycharm+all,jupyternotebooks
+
+# Integration tests cloud configs
+tests/integration/cloud-config-*.ini
+
+
+# VSCode specific extensions
+.vscode/settings.json
+.ansible
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
deleted file mode 100644
index 7e3d19094b..0000000000
--- a/.pre-commit-config.yaml
+++ /dev/null
@@ -1,23 +0,0 @@
----
-# Copyright (c) Ansible Project
-# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-repos:
- - repo: https://github.com/pre-commit/pre-commit-hooks
- rev: v4.0.1
- hooks:
- - id: trailing-whitespace
- - id: end-of-file-fixer
- - id: mixed-line-ending
- args: [--fix=lf]
- - id: fix-encoding-pragma
- - id: check-ast
- - id: check-merge-conflict
- - id: check-symlinks
- - repo: https://github.com/pre-commit/pygrep-hooks
- rev: v1.9.0
- hooks:
- - id: rst-backticks
- types: [file]
- files: changelogs/fragments/.*\.(yml|yaml)$
diff --git a/.reuse/dep5 b/.reuse/dep5
deleted file mode 100644
index 0c3745ebf8..0000000000
--- a/.reuse/dep5
+++ /dev/null
@@ -1,5 +0,0 @@
-Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/
-
-Files: changelogs/fragments/*
-Copyright: Ansible Project
-License: GPL-3.0-or-later
diff --git a/.yamllint b/.yamllint
new file mode 100644
index 0000000000..c10d86ab19
--- /dev/null
+++ b/.yamllint
@@ -0,0 +1,52 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+extends: default
+
+ignore: |
+ /changelogs/
+
+rules:
+ line-length:
+ max: 1000
+ level: error
+ document-start: disable
+ document-end: disable
+ truthy:
+ level: error
+ allowed-values:
+ - 'true'
+ - 'false'
+ indentation:
+ spaces: 2
+ indent-sequences: true
+ key-duplicates: enable
+ trailing-spaces: enable
+ new-line-at-end-of-file: disable
+ hyphens:
+ max-spaces-after: 1
+ empty-lines:
+ max: 2
+ max-start: 0
+ max-end: 0
+ commas:
+ max-spaces-before: 0
+ min-spaces-after: 1
+ max-spaces-after: 1
+ colons:
+ max-spaces-before: 0
+ max-spaces-after: 1
+ brackets:
+ min-spaces-inside: 0
+ max-spaces-inside: 0
+ braces:
+ min-spaces-inside: 0
+ max-spaces-inside: 1
+ octal-values:
+ forbid-implicit-octal: true
+ forbid-explicit-octal: true
+ comments:
+ min-spaces-from-content: 1
+ comments-indentation: false
diff --git a/CHANGELOG.md b/CHANGELOG.md
new file mode 100644
index 0000000000..b35c52441b
--- /dev/null
+++ b/CHANGELOG.md
@@ -0,0 +1,5 @@
+# Placeholder changelog
+
+This file is a placeholder; a version-specific `CHANGELOG-vX.md` will be generated during releases from fragments
+under `changelogs/fragments`. On release branches once a release has been created, consult the branch's version-specific
+file for changes that have occurred in that branch.
diff --git a/tests/sanity/extra/aliases.json.license b/CHANGELOG.md.license
similarity index 100%
rename from tests/sanity/extra/aliases.json.license
rename to CHANGELOG.md.license
diff --git a/CHANGELOG.rst b/CHANGELOG.rst
index 05af1bc345..119e04e170 100644
--- a/CHANGELOG.rst
+++ b/CHANGELOG.rst
@@ -1,575 +1,6 @@
-===============================
-Community General Release Notes
-===============================
+Placeholder changelog
+=====================
-.. contents:: Topics
-
-This changelog describes changes after version 4.0.0.
-
-v5.5.0
-======
-
-Release Summary
----------------
-
-Feature and bugfix release.
-
-Minor Changes
--------------
-
-- Added MIT license as ``LICENSES/MIT.txt`` for tests/unit/plugins/modules/packaging/language/test_gem.py (https://github.com/ansible-collections/community.general/pull/5065).
-- All software licenses are now in the ``LICENSES/`` directory of the collection root (https://github.com/ansible-collections/community.general/pull/5065, https://github.com/ansible-collections/community.general/pull/5079, https://github.com/ansible-collections/community.general/pull/5080, https://github.com/ansible-collections/community.general/pull/5083, https://github.com/ansible-collections/community.general/pull/5087, https://github.com/ansible-collections/community.general/pull/5095, https://github.com/ansible-collections/community.general/pull/5098, https://github.com/ansible-collections/community.general/pull/5106).
-- The collection repository conforms to the `REUSE specification `__ except for the changelog fragments (https://github.com/ansible-collections/community.general/pull/5138).
-- pipx - added state ``latest`` to the module (https://github.com/ansible-collections/community.general/pull/5105).
-- pipx - changed implementation to use ``cmd_runner`` (https://github.com/ansible-collections/community.general/pull/5085).
-- pipx - module fails faster when ``name`` is missing for states ``upgrade`` and ``reinstall`` (https://github.com/ansible-collections/community.general/pull/5100).
-- pipx module utils - created new module util ``pipx`` providing a ``cmd_runner`` specific for the ``pipx`` module (https://github.com/ansible-collections/community.general/pull/5085).
-- proxmox_kvm - allow ``agent`` argument to be a string (https://github.com/ansible-collections/community.general/pull/5107).
-- wdc_redfish_command - add ``IndicatorLedOn`` and ``IndicatorLedOff`` commands for ``Chassis`` category (https://github.com/ansible-collections/community.general/pull/5059).
-
-Bugfixes
---------
-
-- apache2_mod_proxy - avoid crash when reporting inability to parse balancer_member_page HTML caused by using an undefined variable in the error message (https://github.com/ansible-collections/community.general/pull/5111).
-- dig lookup plugin - fix evaluation of falsy values for boolean parameters ``fail_on_error`` and ``retry_servfail`` (https://github.com/ansible-collections/community.general/pull/5129).
-- dnsimple_info - correctly report missing library as ``requests`` and not ``another_library`` (https://github.com/ansible-collections/community.general/pull/5111).
-- funcd connection plugin - fix signature of ``exec_command`` (https://github.com/ansible-collections/community.general/pull/5111).
-- manageiq_alert_profiles - avoid crash when reporting unknown profile caused by trying to return an undefined variable (https://github.com/ansible-collections/community.general/pull/5111).
-- nsupdate - compatibility with NS records (https://github.com/ansible-collections/community.general/pull/5112).
-- packet_ip_subnet - fix error reporting in case of invalid CIDR prefix lengths (https://github.com/ansible-collections/community.general/pull/5111).
-- pip_package_info - remove usage of global variable (https://github.com/ansible-collections/community.general/pull/5111).
-- proxmox_kvm - fix wrong condition (https://github.com/ansible-collections/community.general/pull/5108).
-
-v5.4.0
-======
-
-Release Summary
----------------
-
-Regular bugfix and feature release.
-
-Minor Changes
--------------
-
-- ModuleHelper module utils - added property ``verbosity`` to base class (https://github.com/ansible-collections/community.general/pull/5035).
-- apk - add ``world`` parameter for supporting a custom world file (https://github.com/ansible-collections/community.general/pull/4976).
-- consul - adds ``ttl`` parameter for session (https://github.com/ansible-collections/community.general/pull/4996).
-- dig lookup plugin - add option ``fail_on_error`` to allow stopping execution on lookup failures (https://github.com/ansible-collections/community.general/pull/4973).
-- keycloak_* modules - add ``http_agent`` parameter with default value ``Ansible`` (https://github.com/ansible-collections/community.general/issues/5023).
-- lastpass - use config manager for handling plugin options (https://github.com/ansible-collections/community.general/pull/5022).
-- listen_ports_facts - add new ``include_non_listening`` option which adds ``-a`` option to ``netstat`` and ``ss``. This shows both listening and non-listening (for TCP this means established connections) sockets, and returns ``state`` and ``foreign_address`` (https://github.com/ansible-collections/community.general/issues/4762, https://github.com/ansible-collections/community.general/pull/4953).
-- maven_artifact - add a new ``unredirected_headers`` option that can be used with ansible-core 2.12 and above. The default value is to not use ``Authorization`` and ``Cookie`` headers on redirects for security reasons. With ansible-core 2.11, all headers are still passed on for redirects (https://github.com/ansible-collections/community.general/pull/4812).
-- pacman - added parameters ``reason`` and ``reason_for`` to set/change the install reason of packages (https://github.com/ansible-collections/community.general/pull/4956).
-- xfconf - add ``stdout``, ``stderr`` and ``cmd`` to the module results (https://github.com/ansible-collections/community.general/pull/5037).
-- xfconf - use ``do_raise()`` instead of defining custom exception class (https://github.com/ansible-collections/community.general/pull/4975).
-- xfconf_info - use ``do_raise()`` instead of defining custom exception class (https://github.com/ansible-collections/community.general/pull/4975).
-
-Bugfixes
---------
-
-- keyring_info - fix the result from the keyring library never getting returned (https://github.com/ansible-collections/community.general/pull/4964).
-- pacman - fixed name resolution of URL packages (https://github.com/ansible-collections/community.general/pull/4959).
-- passwordstore lookup plugin - fix ``returnall`` for gopass (https://github.com/ansible-collections/community.general/pull/5027).
-- passwordstore lookup plugin - fix password store path detection for gopass (https://github.com/ansible-collections/community.general/pull/4955).
-- proxmox - fix error handling when getting VM by name when ``state=absent`` (https://github.com/ansible-collections/community.general/pull/4945).
-- proxmox_kvm - fix error handling when getting VM by name when ``state=absent`` (https://github.com/ansible-collections/community.general/pull/4945).
-- slack - fix incorrect channel prefix ``#`` caused by incomplete pattern detection by adding ``G0`` and ``GF`` as channel ID patterns (https://github.com/ansible-collections/community.general/pull/5019).
-- xfconf - fix setting of boolean values (https://github.com/ansible-collections/community.general/issues/4999, https://github.com/ansible-collections/community.general/pull/5007).
-
-New Plugins
------------
-
-Lookup
-~~~~~~
-
-- bitwarden - Retrieve secrets from Bitwarden
-
-New Modules
------------
-
-Remote Management
-~~~~~~~~~~~~~~~~~
-
-redfish
-^^^^^^^
-
-- wdc_redfish_command - Manages WDC UltraStar Data102 Out-Of-Band controllers using Redfish APIs
-- wdc_redfish_info - Manages WDC UltraStar Data102 Out-Of-Band controllers using Redfish APIs
-
-v5.3.0
-======
-
-Release Summary
----------------
-
-Regular bugfix and feature release.
-
-Minor Changes
--------------
-
-- machinectl become plugin - can now be used with a password from another user than root, if a polkit rule is present (https://github.com/ansible-collections/community.general/pull/4849).
-- opentelemetry callback plugin - allow configuring opentelementry callback via config file (https://github.com/ansible-collections/community.general/pull/4916).
-- redfish_info - add ``GetManagerInventory`` to report list of Manager inventory information (https://github.com/ansible-collections/community.general/issues/4899).
-
-Bugfixes
---------
-
-- cmd_runner module utils - fix bug caused by using the ``command`` variable instead of ``self.command`` when looking for binary path (https://github.com/ansible-collections/community.general/pull/4903).
-- dsv lookup plugin - do not ignore the ``tld`` parameter (https://github.com/ansible-collections/community.general/pull/4911).
-- lxd connection plugin - fix incorrect ``inventory_hostname`` in ``remote_addr``. This is needed for compatibility with ansible-core 2.13 (https://github.com/ansible-collections/community.general/issues/4886).
-- proxmox inventory plugin - fix crash when ``enabled=1`` is used in agent config string (https://github.com/ansible-collections/community.general/pull/4910).
-- rax_clb_nodes - fix code to be compatible with Python 3 (https://github.com/ansible-collections/community.general/pull/4933).
-- redfish_info - fix to ``GetChassisPower`` to correctly report power information when multiple chassis exist, but not all chassis report power information (https://github.com/ansible-collections/community.general/issues/4901).
-
-v5.2.0
-======
-
-Release Summary
----------------
-
-Regular bugfix and feature release.
-
-Minor Changes
--------------
-
-- cmd_runner module utils - add ``__call__`` method to invoke context (https://github.com/ansible-collections/community.general/pull/4791).
-- passwordstore lookup plugin - allow using alternative password managers by detecting wrapper scripts, allow explicit configuration of pass and gopass backends (https://github.com/ansible-collections/community.general/issues/4766).
-- sudoers - will attempt to validate the proposed sudoers rule using visudo if available, optionally skipped, or required (https://github.com/ansible-collections/community.general/pull/4794, https://github.com/ansible-collections/community.general/issues/4745).
-
-Bugfixes
---------
-
-- Include ``PSF-license.txt`` file for ``plugins/module_utils/_mount.py``.
-- redfish_command - fix the check if a virtual media is unmounted to just check for ``instered= false`` caused by Supermicro hardware that does not clear the ``ImageName`` (https://github.com/ansible-collections/community.general/pull/4839).
-- redfish_command - the Supermicro Redfish implementation only supports the ``image_url`` parameter in the underlying API calls to ``VirtualMediaInsert`` and ``VirtualMediaEject``. Any values set (or the defaults) for ``write_protected`` or ``inserted`` will be ignored (https://github.com/ansible-collections/community.general/pull/4839).
-- sudoers - fix incorrect handling of ``state: absent`` (https://github.com/ansible-collections/community.general/issues/4852).
-
-New Modules
------------
-
-Cloud
-~~~~~
-
-scaleway
-^^^^^^^^
-
-- scaleway_compute_private_network - Scaleway compute - private network management
-
-System
-~~~~~~
-
-- keyring - Set or delete a passphrase using the Operating System's native keyring
-- keyring_info - Get a passphrase using the Operating System's native keyring
-
-v5.1.1
-======
-
-Release Summary
----------------
-
-Bugfix release.
-
-Bugfixes
---------
-
-- alternatives - do not set the priority if the priority was not set by the user (https://github.com/ansible-collections/community.general/pull/4810).
-- alternatives - only pass subcommands when they are specified as module arguments (https://github.com/ansible-collections/community.general/issues/4803, https://github.com/ansible-collections/community.general/issues/4804, https://github.com/ansible-collections/community.general/pull/4836).
-- alternatives - when ``subcommands`` is specified, ``link`` must be given for every subcommand. This was already mentioned in the documentation, but not enforced by the code (https://github.com/ansible-collections/community.general/pull/4836).
-- nmcli - fix error caused by adding undefined module arguments for list options (https://github.com/ansible-collections/community.general/issues/4373, https://github.com/ansible-collections/community.general/pull/4813).
-- proxmox inventory plugin - fixed extended status detection for qemu (https://github.com/ansible-collections/community.general/pull/4816).
-- redhat_subscription - fix unsubscribing on RHEL 9 (https://github.com/ansible-collections/community.general/issues/4741).
-- sudoers - ensure sudoers config files are created with the permissions requested by sudoers (0440) (https://github.com/ansible-collections/community.general/pull/4814).
-
-v5.1.0
-======
-
-Release Summary
----------------
-
-Regular bugfix and feature release.
-
-Minor Changes
--------------
-
-- ModuleHelper module utils - improved ``ModuleHelperException``, using ``to_native()`` for the exception message (https://github.com/ansible-collections/community.general/pull/4755).
-- alternatives - add ``state=absent`` to be able to remove an alternative (https://github.com/ansible-collections/community.general/pull/4654).
-- alternatives - add ``subcommands`` parameter (https://github.com/ansible-collections/community.general/pull/4654).
-- ansible_galaxy_install - minor refactoring using latest ``ModuleHelper`` updates (https://github.com/ansible-collections/community.general/pull/4752).
-- cmd_runner module util - added parameters ``check_mode_skip`` and ``check_mode_return`` to ``CmdRunner.context()``, so that the command is not executed when ``check_mode=True`` (https://github.com/ansible-collections/community.general/pull/4736).
-- nmcli - adds ``vpn`` type and parameter for supporting VPN with service type L2TP and PPTP (https://github.com/ansible-collections/community.general/pull/4746).
-- proxmox inventory plugin - added new flag ``qemu_extended_statuses`` and new groups ``prelaunch``, ``paused``. They will be populated only when ``want_facts=true``, ``qemu_extended_statuses=true`` and only for ``QEMU`` machines (https://github.com/ansible-collections/community.general/pull/4723).
-- puppet - adds ``confdir`` parameter to configure a custom confir location (https://github.com/ansible-collections/community.general/pull/4740).
-- xfconf - changed implementation to use ``cmd_runner`` (https://github.com/ansible-collections/community.general/pull/4776).
-- xfconf module utils - created new module util ``xfconf`` providing a ``cmd_runner`` specific for ``xfconf`` modules (https://github.com/ansible-collections/community.general/pull/4776).
-- xfconf_info - changed implementation to use ``cmd_runner`` (https://github.com/ansible-collections/community.general/pull/4776).
-
-Deprecated Features
--------------------
-
-- cmd_runner module utils - deprecated ``fmt`` in favour of ``cmd_runner_fmt`` as the parameter format object (https://github.com/ansible-collections/community.general/pull/4777).
-
-New Modules
------------
-
-System
-~~~~~~
-
-- gconftool2_info - Retrieve GConf configurations
-
-v5.0.2
-======
-
-Release Summary
----------------
-
-Maintenance and bugfix release for Ansible 6.0.0.
-
-Bugfixes
---------
-
-- Include ``simplified_bsd.txt`` license file for various module utils, the ``lxca_common`` docs fragment, and the ``utm_utils`` unit tests.
-
-v5.0.1
-======
-
-Release Summary
----------------
-
-Regular bugfix release for inclusion in Ansible 6.0.0.
-
-Minor Changes
--------------
-
-- cpanm - using ``do_raise()`` to raise exceptions in ``ModuleHelper`` derived modules (https://github.com/ansible-collections/community.general/pull/4674).
-- mksysb - using ``do_raise()`` to raise exceptions in ``ModuleHelper`` derived modules (https://github.com/ansible-collections/community.general/pull/4674).
-- pipx - using ``do_raise()`` to raise exceptions in ``ModuleHelper`` derived modules (https://github.com/ansible-collections/community.general/pull/4674).
-- snap - using ``do_raise()`` to raise exceptions in ``ModuleHelper`` derived modules (https://github.com/ansible-collections/community.general/pull/4674).
-- xfconf - using ``do_raise()`` to raise exceptions in ``ModuleHelper`` derived modules (https://github.com/ansible-collections/community.general/pull/4674).
-
-Bugfixes
---------
-
-- consul - fixed bug introduced in PR 4590 (https://github.com/ansible-collections/community.general/issues/4680).
-- filesystem - handle ``fatresize --info`` output lines without ``:`` (https://github.com/ansible-collections/community.general/pull/4700).
-- filesystem - improve error messages when output cannot be parsed by including newlines in escaped form (https://github.com/ansible-collections/community.general/pull/4700).
-- keycloak_realm - fix default groups and roles (https://github.com/ansible-collections/community.general/issues/4241).
-- redis* modules - fix call to ``module.fail_json`` when failing because of missing Python libraries (https://github.com/ansible-collections/community.general/pull/4733).
-- xcc_redfish_command - for compatibility due to Redfish spec changes the virtualMedia resource location changed from Manager to System (https://github.com/ansible-collections/community.general/pull/4682).
-- zfs - fix wrong quoting of properties (https://github.com/ansible-collections/community.general/issues/4707, https://github.com/ansible-collections/community.general/pull/4726).
-
-v5.0.0
-======
-
-Release Summary
----------------
-
-This is release 5.0.0 of ``community.general``, released on 2022-05-17.
-
-Major Changes
--------------
-
-- The community.general collection no longer supports Ansible 2.9 and ansible-base 2.10. While we take no active measures to prevent usage, we will remove a lot of compatibility code and other compatility measures that will effectively prevent using most content from this collection with Ansible 2.9, and some content of this collection with ansible-base 2.10. Both Ansible 2.9 and ansible-base 2.10 will very soon be End of Life and if you are still using them, you should consider upgrading to ansible-core 2.11 or later as soon as possible (https://github.com/ansible-collections/community.general/pull/4548).
-
-Minor Changes
--------------
-
-- Avoid internal ansible-core module_utils in favor of equivalent public API available since at least Ansible 2.9. This fixes some instances added since the last time this was fixed (https://github.com/ansible-collections/community.general/pull/4232).
-- ModuleHelper module utils - ``ModuleHelperBase` now delegates the attributes ``check_mode``, ``get_bin_path``, ``warn``, and ``deprecate`` to the underlying ``AnsibleModule`` instance (https://github.com/ansible-collections/community.general/pull/4600).
-- ModuleHelper module utils - ``ModuleHelperBase`` now has a convenience method ``do_raise`` (https://github.com/ansible-collections/community.general/pull/4660).
-- Remove vendored copy of ``distutils.version`` in favor of vendored copy included with ansible-core 2.12+. For ansible-core 2.11, uses ``distutils.version`` for Python < 3.12. There is no support for ansible-core 2.11 with Python 3.12+ (https://github.com/ansible-collections/community.general/pull/3988).
-- aix_filesystem - calling ``run_command`` with arguments as ``list`` instead of ``str`` (https://github.com/ansible-collections/community.general/pull/3833).
-- aix_lvg - calling ``run_command`` with arguments as ``list`` instead of ``str`` (https://github.com/ansible-collections/community.general/pull/3834).
-- alternatives - add ``state`` parameter, which provides control over whether the alternative should be set as the active selection for its alternatives group (https://github.com/ansible-collections/community.general/issues/4543, https://github.com/ansible-collections/community.general/pull/4557).
-- ansible_galaxy_install - added option ``no_deps`` to the module (https://github.com/ansible-collections/community.general/issues/4174).
-- atomic_container - minor refactoring (https://github.com/ansible-collections/community.general/pull/4567).
-- clc_alert_policy - minor refactoring (https://github.com/ansible-collections/community.general/pull/4556).
-- clc_group - minor refactoring (https://github.com/ansible-collections/community.general/pull/4556).
-- clc_loadbalancer - minor refactoring (https://github.com/ansible-collections/community.general/pull/4556).
-- clc_server - minor refactoring (https://github.com/ansible-collections/community.general/pull/4556).
-- cmd_runner module util - reusable command runner with consistent argument formatting and sensible defaults (https://github.com/ansible-collections/community.general/pull/4476).
-- cobbler inventory plugin - add ``include_profiles`` option (https://github.com/ansible-collections/community.general/pull/4068).
-- datadog_monitor - support new datadog event monitor of type `event-v2 alert` (https://github.com/ansible-collections/community.general/pull/4457)
-- filesystem - add support for resizing btrfs (https://github.com/ansible-collections/community.general/issues/4465).
-- gitlab - add more token authentication support with the new options ``api_oauth_token`` and ``api_job_token`` (https://github.com/ansible-collections/community.general/issues/705).
-- gitlab - clean up modules and utils (https://github.com/ansible-collections/community.general/pull/3694).
-- gitlab_group, gitlab_project - add new option ``avatar_path`` (https://github.com/ansible-collections/community.general/pull/3792).
-- gitlab_group_variable - new ``variables`` parameter (https://github.com/ansible-collections/community.general/pull/4038 and https://github.com/ansible-collections/community.general/issues/4074).
-- gitlab_project - add new option ``default_branch`` to gitlab_project (if ``readme = true``) (https://github.com/ansible-collections/community.general/pull/3792).
-- gitlab_project_variable - new ``variables`` parameter (https://github.com/ansible-collections/community.general/issues/4038).
-- hponcfg - revamped module using ModuleHelper (https://github.com/ansible-collections/community.general/pull/3840).
-- icinga2 inventory plugin - added the ``display_name`` field to variables (https://github.com/ansible-collections/community.general/issues/3875, https://github.com/ansible-collections/community.general/pull/3906).
-- icinga2 inventory plugin - implemented constructed interface (https://github.com/ansible-collections/community.general/pull/4088).
-- icinga2 inventory plugin - inventory object names are changable using ``inventory_attr`` in your config file to the host object name, address, or display_name fields (https://github.com/ansible-collections/community.general/issues/3875, https://github.com/ansible-collections/community.general/pull/3906).
-- ip_netns - calling ``run_command`` with arguments as ``list`` instead of ``str`` (https://github.com/ansible-collections/community.general/pull/3822).
-- ipa_dnsrecord - add new argument ``record_values``, mutually exclusive to ``record_value``, which supports multiple values for one record (https://github.com/ansible-collections/community.general/pull/4578).
-- ipa_dnszone - ``dynamicupdate`` is now a boolean parameter, instead of a string parameter accepting ``"true"`` and ``"false"``. Also the module is now idempotent with respect to ``dynamicupdate`` (https://github.com/ansible-collections/community.general/pull/3374).
-- ipa_dnszone - add DNS zone synchronization support (https://github.com/ansible-collections/community.general/pull/3374).
-- ipa_service - add ``skip_host_check`` parameter. (https://github.com/ansible-collections/community.general/pull/4417).
-- ipmi_boot - add support for user-specified IPMI encryption key (https://github.com/ansible-collections/community.general/issues/3698).
-- ipmi_power - add ``machine`` option to ensure the power state via the remote target address (https://github.com/ansible-collections/community.general/pull/3968).
-- ipmi_power - add support for user-specified IPMI encryption key (https://github.com/ansible-collections/community.general/issues/3698).
-- iso_extract - calling ``run_command`` with arguments as ``list`` instead of ``str`` (https://github.com/ansible-collections/community.general/pull/3805).
-- java_cert - calling ``run_command`` with arguments as ``list`` instead of ``str`` (https://github.com/ansible-collections/community.general/pull/3835).
-- jira - add support for Bearer token auth (https://github.com/ansible-collections/community.general/pull/3838).
-- jira - when creating a comment, ``fields`` now is used for additional data (https://github.com/ansible-collections/community.general/pull/4304).
-- keycloak_* modules - added connection timeout parameter when calling server (https://github.com/ansible-collections/community.general/pull/4168).
-- keycloak_client - add ``always_display_in_console`` parameter (https://github.com/ansible-collections/community.general/issues/4390).
-- keycloak_client - add ``default_client_scopes`` and ``optional_client_scopes`` parameters. (https://github.com/ansible-collections/community.general/pull/4385).
-- keycloak_user_federation - add sssd user federation support (https://github.com/ansible-collections/community.general/issues/3767).
-- ldap_entry - add support for recursive deletion (https://github.com/ansible-collections/community.general/issues/3613).
-- linode inventory plugin - add support for caching inventory results (https://github.com/ansible-collections/community.general/pull/4179).
-- linode inventory plugin - allow templating of ``access_token`` variable in Linode inventory plugin (https://github.com/ansible-collections/community.general/pull/4040).
-- listen_ports_facts - add support for ``ss`` command besides ``netstat`` (https://github.com/ansible-collections/community.general/pull/3708).
-- lists_mergeby filter plugin - add parameters ``list_merge`` and ``recursive``. These are only supported when used with ansible-base 2.10 or ansible-core, but not with Ansible 2.9 (https://github.com/ansible-collections/community.general/pull/4058).
-- logentries - calling ``run_command`` with arguments as ``list`` instead of ``str`` (https://github.com/ansible-collections/community.general/pull/3807).
-- logstash_plugin - calling ``run_command`` with arguments as ``list`` instead of ``str`` (https://github.com/ansible-collections/community.general/pull/3808).
-- lxc_container - added ``wait_for_container`` parameter. If ``true`` the module will wait until the running task reports success as the status (https://github.com/ansible-collections/community.general/pull/4039).
-- lxc_container - calling ``run_command`` with arguments as ``list`` instead of ``str`` (https://github.com/ansible-collections/community.general/pull/3851).
-- lxd connection plugin - make sure that ``ansible_lxd_host``, ``ansible_executable``, and ``ansible_lxd_executable`` work (https://github.com/ansible-collections/community.general/pull/3798).
-- lxd inventory plugin - support virtual machines (https://github.com/ansible-collections/community.general/pull/3519).
-- lxd_container - adds ``project`` option to allow selecting project for LXD instance (https://github.com/ansible-collections/community.general/pull/4479).
-- lxd_container - adds ``type`` option which also allows to operate on virtual machines and not just containers (https://github.com/ansible-collections/community.general/pull/3661).
-- lxd_profile - adds ``project`` option to allow selecting project for LXD profile (https://github.com/ansible-collections/community.general/pull/4479).
-- mail callback plugin - add ``Message-ID`` and ``Date`` headers (https://github.com/ansible-collections/community.general/issues/4055, https://github.com/ansible-collections/community.general/pull/4056).
-- mail callback plugin - properly use Ansible's option handling to split lists (https://github.com/ansible-collections/community.general/pull/4140).
-- mattermost - add the possibility to send attachments instead of text messages (https://github.com/ansible-collections/community.general/pull/3946).
-- mksysb - revamped the module using ``ModuleHelper`` (https://github.com/ansible-collections/community.general/pull/3295).
-- module_helper module utils - added decorators ``check_mode_skip`` and ``check_mode_skip_returns`` for skipping methods when ``check_mode=True`` (https://github.com/ansible-collections/community.general/pull/3849).
-- monit - calling ``run_command`` with arguments as ``list`` instead of ``str`` (https://github.com/ansible-collections/community.general/pull/3821).
-- nmap inventory plugin - add ``sudo`` option in plugin in order to execute ``sudo nmap`` so that ``nmap`` runs with elevated privileges (https://github.com/ansible-collections/community.general/pull/4506).
-- nmcli - add ``wireguard`` connection type (https://github.com/ansible-collections/community.general/pull/3985).
-- nmcli - add missing connection aliases ``802-3-ethernet`` and ``802-11-wireless`` (https://github.com/ansible-collections/community.general/pull/4108).
-- nmcli - add multiple addresses support for ``ip4`` parameter (https://github.com/ansible-collections/community.general/issues/1088, https://github.com/ansible-collections/community.general/pull/3738).
-- nmcli - add multiple addresses support for ``ip6`` parameter (https://github.com/ansible-collections/community.general/issues/1088).
-- nmcli - add support for ``eui64`` and ``ipv6privacy`` parameters (https://github.com/ansible-collections/community.general/issues/3357).
-- nmcli - adds ``routes6`` and ``route_metric6`` parameters for supporting IPv6 routes (https://github.com/ansible-collections/community.general/issues/4059).
-- nmcli - remove nmcli modify dependency on ``type`` parameter (https://github.com/ansible-collections/community.general/issues/2858).
-- nomad_job - minor refactoring (https://github.com/ansible-collections/community.general/pull/4567).
-- nomad_job_info - minor refactoring (https://github.com/ansible-collections/community.general/pull/4567).
-- npm - add ability to use ``production`` flag when ``ci`` is set (https://github.com/ansible-collections/community.general/pull/4299).
-- open_iscsi - extended module to allow rescanning of established session for one or all targets (https://github.com/ansible-collections/community.general/issues/3763).
-- opennebula - add the release action for VMs in the ``HOLD`` state (https://github.com/ansible-collections/community.general/pull/4036).
-- opentelemetry_plugin - enrich service when using the ``docker_login`` (https://github.com/ansible-collections/community.general/pull/4104).
-- opentelemetry_plugin - enrich service when using the ``jenkins``, ``hetzner`` or ``jira`` modules (https://github.com/ansible-collections/community.general/pull/4105).
-- packet_device - minor refactoring (https://github.com/ansible-collections/community.general/pull/4567).
-- packet_sshkey - minor refactoring (https://github.com/ansible-collections/community.general/pull/4567).
-- packet_volume - minor refactoring (https://github.com/ansible-collections/community.general/pull/4567).
-- pacman - add ``remove_nosave`` parameter to avoid saving modified configuration files as ``.pacsave`` files. (https://github.com/ansible-collections/community.general/pull/4316, https://github.com/ansible-collections/community.general/issues/4315).
-- pacman - add ``stdout`` and ``stderr`` as return values (https://github.com/ansible-collections/community.general/pull/3758).
-- pacman - now implements proper change detection for ``update_cache=true``. Adds ``cache_updated`` return value to when ``update_cache=true`` to report this result independently of the module's overall changed return value (https://github.com/ansible-collections/community.general/pull/4337).
-- pacman - the module has been rewritten and is now much faster when using ``state=latest``. Operations are now done all packages at once instead of package per package and the configured output format of ``pacman`` no longer affect the module's operation. (https://github.com/ansible-collections/community.general/pull/3907, https://github.com/ansible-collections/community.general/issues/3783, https://github.com/ansible-collections/community.general/issues/4079)
-- passwordstore lookup plugin - add configurable ``lock`` and ``locktimeout`` options to avoid race conditions in itself and in the ``pass`` utility it calls. By default, the plugin now locks on write operations (https://github.com/ansible-collections/community.general/pull/4194).
-- pipx - added options ``editable`` and ``pip_args`` (https://github.com/ansible-collections/community.general/issues/4300).
-- pritunl_user - add ``mac_addresses`` parameter (https://github.com/ansible-collections/community.general/pull/4535).
-- profitbricks - minor refactoring (https://github.com/ansible-collections/community.general/pull/4567).
-- proxmox - add ``clone`` parameter (https://github.com/ansible-collections/community.general/pull/3930).
-- proxmox - minor refactoring (https://github.com/ansible-collections/community.general/pull/4567).
-- proxmox inventory plugin - add support for client-side jinja filters (https://github.com/ansible-collections/community.general/issues/3553).
-- proxmox inventory plugin - add support for templating the ``url``, ``user``, and ``password`` options (https://github.com/ansible-collections/community.general/pull/4418).
-- proxmox inventory plugin - add token authentication as an alternative to username/password (https://github.com/ansible-collections/community.general/pull/4540).
-- proxmox inventory plugin - parse LXC configs returned by the proxmox API (https://github.com/ansible-collections/community.general/pull/4472).
-- proxmox modules - move ``HAS_PROXMOXER`` check into ``module_utils`` (https://github.com/ansible-collections/community.general/pull/4030).
-- proxmox modules - move common code into ``module_utils`` (https://github.com/ansible-collections/community.general/pull/4029).
-- proxmox_kvm - added EFI disk support when creating VM with OVMF UEFI BIOS with new ``efidisk0`` option (https://github.com/ansible-collections/community.general/pull/4106, https://github.com/ansible-collections/community.general/issues/1638).
-- proxmox_kwm - add ``win11`` to ``ostype`` parameter for Windows 11 and Windows Server 2022 support (https://github.com/ansible-collections/community.general/issues/4023, https://github.com/ansible-collections/community.general/pull/4191).
-- proxmox_snap - add restore snapshot option (https://github.com/ansible-collections/community.general/pull/4377).
-- proxmox_snap - fixed timeout value to correctly reflect time in seconds. The timeout was off by one second (https://github.com/ansible-collections/community.general/pull/4377).
-- puppet - remove deprecation for ``show_diff`` parameter. Its alias ``show-diff`` is still deprecated and will be removed in community.general 7.0.0 (https://github.com/ansible-collections/community.general/pull/3980).
-- python_requirements_info - returns python version broken down into its components, and some minor refactoring (https://github.com/ansible-collections/community.general/pull/3797).
-- rax_files_objects - minor refactoring improving code quality (https://github.com/ansible-collections/community.general/pull/4649).
-- redfish_* modules - the contents of ``@Message.ExtendedInfo`` will be returned as a string in the event that ``@Message.ExtendedInfo.Messages`` does not exist. This is likely more useful than the standard HTTP error (https://github.com/ansible-collections/community.general/pull/4596).
-- redfish_command - add ``GetHostInterfaces`` command to enable reporting Redfish Host Interface information (https://github.com/ansible-collections/community.general/issues/3693).
-- redfish_command - add ``IndicatorLedOn``, ``IndicatorLedOff``, and ``IndicatorLedBlink`` commands to the Systems category for controling system LEDs (https://github.com/ansible-collections/community.general/issues/4084).
-- redfish_command - add ``SetHostInterface`` command to enable configuring the Redfish Host Interface (https://github.com/ansible-collections/community.general/issues/3632).
-- redis - add authentication parameters ``login_user``, ``tls``, ``validate_certs``, and ``ca_certs`` (https://github.com/ansible-collections/community.general/pull/4207).
-- scaleway inventory plugin - add profile parameter ``scw_profile`` (https://github.com/ansible-collections/community.general/pull/4049).
-- scaleway_compute - add possibility to use project identifier (new ``project`` option) instead of deprecated organization identifier (https://github.com/ansible-collections/community.general/pull/3951).
-- scaleway_volume - all volumes are systematically created on par1 (https://github.com/ansible-collections/community.general/pull/3964).
-- seport - minor refactoring (https://github.com/ansible-collections/community.general/pull/4471).
-- smartos_image_info - minor refactoring (https://github.com/ansible-collections/community.general/pull/4567).
-- snap - add option ``options`` permitting to set options using the ``snap set`` command (https://github.com/ansible-collections/community.general/pull/3943).
-- sudoers - add support for ``runas`` parameter (https://github.com/ansible-collections/community.general/issues/4379).
-- svc - calling ``run_command`` with arguments as ``list`` instead of ``str`` (https://github.com/ansible-collections/community.general/pull/3829).
-- syslog_json - add option to skip logging of ``gather_facts`` playbook tasks; use v2 callback API (https://github.com/ansible-collections/community.general/pull/4223).
-- terraform - adds ``terraform_upgrade`` parameter which allows ``terraform init`` to satisfy new provider constraints in an existing Terraform project (https://github.com/ansible-collections/community.general/issues/4333).
-- to_time_unit filter plugins - the time filters has been extended to also allow ``0`` as input (https://github.com/ansible-collections/community.general/pull/4612).
-- udm_group - minor refactoring (https://github.com/ansible-collections/community.general/pull/4556).
-- udm_share - minor refactoring (https://github.com/ansible-collections/community.general/pull/4556).
-- vmadm - minor refactoring (https://github.com/ansible-collections/community.general/pull/4567).
-- vmadm - minor refactoring and improvement on the module (https://github.com/ansible-collections/community.general/pull/4581).
-- vmadm - minor refactoring and improvement on the module (https://github.com/ansible-collections/community.general/pull/4648).
-- webfaction_app - minor refactoring (https://github.com/ansible-collections/community.general/pull/4567).
-- webfaction_db - minor refactoring (https://github.com/ansible-collections/community.general/pull/4567).
-- xattr - calling ``run_command`` with arguments as ``list`` instead of ``str`` (https://github.com/ansible-collections/community.general/pull/3806).
-- xfconf - added missing value types ``char``, ``uchar``, ``int64`` and ``uint64`` (https://github.com/ansible-collections/community.general/pull/4534).
-- xfconf - minor refactor on the base class for the module (https://github.com/ansible-collections/community.general/pull/3919).
-- zfs - minor refactoring in the code (https://github.com/ansible-collections/community.general/pull/4650).
-- zypper - add support for ``--clean-deps`` option to remove packages that depend on a package being removed (https://github.com/ansible-collections/community.general/pull/4195).
-
-Breaking Changes / Porting Guide
---------------------------------
-
-- Parts of this collection do not work with ansible-core 2.11 on Python 3.12+. Please either upgrade to ansible-core 2.12+, or use Python 3.11 or earlier (https://github.com/ansible-collections/community.general/pull/3988).
-- The symbolic links used to implement flatmapping for all modules were removed and replaced by ``meta/runtime.yml`` redirects. This effectively breaks compatibility with Ansible 2.9 for all modules (without using their "long" names, which is discouraged and which can change without previous notice since they are considered an implementation detail) (https://github.com/ansible-collections/community.general/pull/4548).
-- a_module test plugin - remove Ansible 2.9 compatibility code (https://github.com/ansible-collections/community.general/pull/4548).
-- archive - remove Ansible 2.9 compatibility code (https://github.com/ansible-collections/community.general/pull/4548).
-- git_config - remove Ansible 2.9 and early ansible-base 2.10 compatibility code (https://github.com/ansible-collections/community.general/pull/4548).
-- java_keystore - remove Ansible 2.9 compatibility code (https://github.com/ansible-collections/community.general/pull/4548).
-- lists_mergeby and groupby_as_dict filter plugins - adjust filter plugin filename. This change is not visible to end-users, it only affects possible other collections importing Python paths (https://github.com/ansible-collections/community.general/pull/4625).
-- lists_mergeby filter plugin - remove Ansible 2.9 compatibility code (https://github.com/ansible-collections/community.general/pull/4548).
-- maven_artifact - remove Ansible 2.9 compatibility code (https://github.com/ansible-collections/community.general/pull/4548).
-- memcached cache plugin - remove Ansible 2.9 compatibility code (https://github.com/ansible-collections/community.general/pull/4548).
-- path_join filter plugin shim - remove Ansible 2.9 compatibility code (https://github.com/ansible-collections/community.general/pull/4548).
-- redis cache plugin - remove Ansible 2.9 compatibility code (https://github.com/ansible-collections/community.general/pull/4548).
-- yarn - remove unsupported and unnecessary ``--no-emoji`` flag (https://github.com/ansible-collections/community.general/pull/4662).
-
-Deprecated Features
--------------------
-
-- ansible_galaxy_install - deprecated support for ``ansible`` 2.9 and ``ansible-base`` 2.10 (https://github.com/ansible-collections/community.general/pull/4601).
-- dig lookup plugin - the ``DLV`` record type has been decommissioned in 2017 and support for it will be removed from community.general 6.0.0 (https://github.com/ansible-collections/community.general/pull/4618).
-- gem - the default of the ``norc`` option has been deprecated and will change to ``true`` in community.general 6.0.0. Explicitly specify a value to avoid a deprecation warning (https://github.com/ansible-collections/community.general/pull/4517).
-- mail callback plugin - not specifying ``sender`` is deprecated and will be disallowed in community.general 6.0.0 (https://github.com/ansible-collections/community.general/pull/4140).
-- module_helper module utils - deprecated the attribute ``ModuleHelper.VarDict`` (https://github.com/ansible-collections/community.general/pull/3801).
-- nmcli - deprecate default hairpin mode for a bridge. This so we can change it to ``false`` in community.general 7.0.0, as this is also the default in ``nmcli`` (https://github.com/ansible-collections/community.general/pull/4334).
-- pacman - from community.general 5.0.0 on, the ``changed`` status of ``update_cache`` will no longer be ignored if ``name`` or ``upgrade`` is specified. To keep the old behavior, add something like ``register: result`` and ``changed_when: result.packages | length > 0`` to your task (https://github.com/ansible-collections/community.general/pull/4329).
-- proxmox inventory plugin - the current default ``true`` of the ``want_proxmox_nodes_ansible_host`` option has been deprecated. The default will change to ``false`` in community.general 6.0.0. To keep the current behavior, explicitly set ``want_proxmox_nodes_ansible_host`` to ``true`` in your inventory configuration. We suggest to already switch to the new behavior by explicitly setting it to ``false``, and by using ``compose:`` to set ``ansible_host`` to the correct value. See the examples in the plugin documentation for details (https://github.com/ansible-collections/community.general/pull/4466).
-- vmadm - deprecated module parameter ``debug`` that was not used anywhere (https://github.com/ansible-collections/community.general/pull/4580).
-
-Removed Features (previously deprecated)
-----------------------------------------
-
-- ali_instance_info - removed the options ``availability_zone``, ``instance_ids``, and ``instance_names``. Use filter item ``zone_id`` instead of ``availability_zone``, filter item ``instance_ids`` instead of ``instance_ids``, and filter item ``instance_name`` instead of ``instance_names`` (https://github.com/ansible-collections/community.general/pull/4516).
-- apt_rpm - removed the deprecated alias ``update-cache`` of ``update_cache`` (https://github.com/ansible-collections/community.general/pull/4516).
-- compose - removed various deprecated aliases. Use the version with ``_`` instead of ``-`` instead (https://github.com/ansible-collections/community.general/pull/4516).
-- dnsimple - remove support for dnsimple < 2.0.0 (https://github.com/ansible-collections/community.general/pull/4516).
-- github_deploy_key - removed the deprecated alias ``2fa_token`` of ``otp`` (https://github.com/ansible-collections/community.general/pull/4516).
-- homebrew, homebrew_cask - removed the deprecated alias ``update-brew`` of ``update_brew`` (https://github.com/ansible-collections/community.general/pull/4516).
-- linode - removed the ``backupsenabled`` option. Use ``backupweeklyday`` or ``backupwindow`` to enable backups (https://github.com/ansible-collections/community.general/pull/4516).
-- opkg - removed the deprecated alias ``update-cache`` of ``update_cache`` (https://github.com/ansible-collections/community.general/pull/4516).
-- pacman - if ``update_cache=true`` is used with ``name`` or ``upgrade``, the changed state will now also indicate if only the cache was updated. To keep the old behavior - only indicate ``changed`` when a package was installed/upgraded -, use ``changed_when`` as indicated in the module examples (https://github.com/ansible-collections/community.general/pull/4516).
-- pacman - removed the deprecated alias ``update-cache`` of ``update_cache`` (https://github.com/ansible-collections/community.general/pull/4516).
-- proxmox, proxmox_kvm, proxmox_snap - no longer allow to specify a VM name that matches multiple VMs. If this happens, the modules now fail (https://github.com/ansible-collections/community.general/pull/4516).
-- serverless - removed the ``functions`` option. It was not used by the module (https://github.com/ansible-collections/community.general/pull/4516).
-- slackpkg - removed the deprecated alias ``update-cache`` of ``update_cache`` (https://github.com/ansible-collections/community.general/pull/4516).
-- urpmi - removed the deprecated alias ``no-recommends`` of ``no_recommends`` (https://github.com/ansible-collections/community.general/pull/4516).
-- urpmi - removed the deprecated alias ``update-cache`` of ``update_cache`` (https://github.com/ansible-collections/community.general/pull/4516).
-- xbps - removed the deprecated alias ``update-cache`` of ``update_cache`` (https://github.com/ansible-collections/community.general/pull/4516).
-- xfconf - the ``get`` state has been removed. Use the ``xfconf_info`` module instead (https://github.com/ansible-collections/community.general/pull/4516).
-
-Bugfixes
---------
-
-- Various modules and plugins - use vendored version of ``distutils.version`` instead of the deprecated Python standard library ``distutils`` (https://github.com/ansible-collections/community.general/pull/3936).
-- a_module test plugin - fix crash when testing a module name that was tombstoned (https://github.com/ansible-collections/community.general/pull/3660).
-- alternatives - fix output parsing for alternatives groups (https://github.com/ansible-collections/community.general/pull/3976).
-- cargo - fix detection of outdated packages when ``state=latest`` (https://github.com/ansible-collections/community.general/pull/4052).
-- cargo - fix incorrectly reported changed status for packages with a name containing a hyphen (https://github.com/ansible-collections/community.general/issues/4044, https://github.com/ansible-collections/community.general/pull/4052).
-- consul - fixed bug where class ``ConsulService`` was overwriting the field ``checks``, preventing the addition of checks to a service (https://github.com/ansible-collections/community.general/pull/4590).
-- counter_enabled callback plugin - fix output to correctly display host and task counters in serial mode (https://github.com/ansible-collections/community.general/pull/3709).
-- dconf - skip processes that disappeared while we inspected them (https://github.com/ansible-collections/community.general/issues/4151).
-- dnsmadeeasy - fix failure on deleting DNS entries when API response does not contain monitor value (https://github.com/ansible-collections/community.general/issues/3620).
-- dsv lookup plugin - raise an Ansible error if the wrong ``python-dsv-sdk`` version is installed (https://github.com/ansible-collections/community.general/pull/4422).
-- filesize - add support for busybox dd implementation, that is used by default on Alpine linux (https://github.com/ansible-collections/community.general/pull/4288, https://github.com/ansible-collections/community.general/issues/4259).
-- gconftool2 - properly escape values when passing them to ``gconftool-2`` (https://github.com/ansible-collections/community.general/pull/4647).
-- git_branch - remove deprecated and unnecessary branch ``unprotect`` method (https://github.com/ansible-collections/community.general/pull/4496).
-- github_repo - ``private`` and ``description`` attributes should not be set to default values when the repo already exists (https://github.com/ansible-collections/community.general/pull/2386).
-- gitlab_group - improve searching for projects inside group on deletion (https://github.com/ansible-collections/community.general/pull/4491).
-- gitlab_group_members - handle more than 20 groups when finding a group (https://github.com/ansible-collections/community.general/pull/4491, https://github.com/ansible-collections/community.general/issues/4460, https://github.com/ansible-collections/community.general/issues/3729).
-- gitlab_group_variable - add missing documentation about GitLab versions that support ``environment_scope`` and ``variable_type`` (https://github.com/ansible-collections/community.general/pull/4038).
-- gitlab_group_variable - allow to set same variable name under different environment scopes. Due this change, the return value ``group_variable`` differs from previous version in check mode. It was counting ``updated`` values, because it was accidentally overwriting environment scopes (https://github.com/ansible-collections/community.general/pull/4038).
-- gitlab_group_variable - fix idempotent change behaviour for float and integer variables (https://github.com/ansible-collections/community.general/pull/4038).
-- gitlab_hook - avoid errors during idempotency check when an attribute does not exist (https://github.com/ansible-collections/community.general/pull/4668).
-- gitlab_hook - handle more than 20 hooks when finding a hook (https://github.com/ansible-collections/community.general/pull/4491).
-- gitlab_project - handle more than 20 namespaces when finding a namespace (https://github.com/ansible-collections/community.general/pull/4491).
-- gitlab_project_members - handle more than 20 projects and users when finding a project resp. user (https://github.com/ansible-collections/community.general/pull/4491).
-- gitlab_project_variable - ``value`` is not necessary when deleting variables (https://github.com/ansible-collections/community.general/pull/4150).
-- gitlab_project_variable - add missing documentation about GitLab versions that support ``environment_scope`` and ``variable_type`` (https://github.com/ansible-collections/community.general/issues/4038).
-- gitlab_project_variable - allow to set same variable name under different environment scopes. Due this change, the return value ``project_variable`` differs from previous version in check mode. It was counting ``updated`` values, because it was accidentally overwriting environment scopes (https://github.com/ansible-collections/community.general/issues/4038).
-- gitlab_project_variable - fix idempotent change behaviour for float and integer variables (https://github.com/ansible-collections/community.general/issues/4038).
-- gitlab_runner - make ``project`` and ``owned`` mutually exclusive (https://github.com/ansible-collections/community.general/pull/4136).
-- gitlab_runner - use correct API endpoint to create and retrieve project level runners when using ``project`` (https://github.com/ansible-collections/community.general/pull/3965).
-- gitlab_user - handle more than 20 users and SSH keys when finding a user resp. SSH key (https://github.com/ansible-collections/community.general/pull/4491).
-- homebrew_cask - fix force install operation (https://github.com/ansible-collections/community.general/issues/3703).
-- icinga2 inventory plugin - handle 404 error when filter produces no results (https://github.com/ansible-collections/community.general/issues/3875, https://github.com/ansible-collections/community.general/pull/3906).
-- imc_rest - fixes the module failure due to the usage of ``itertools.izip_longest`` which is not available in Python 3 (https://github.com/ansible-collections/community.general/issues/4206).
-- ini_file - when removing nothing do not report changed (https://github.com/ansible-collections/community.general/issues/4154).
-- interfaces_file - fixed the check for existing option in interface (https://github.com/ansible-collections/community.general/issues/3841).
-- jail connection plugin - replace deprecated ``distutils.spawn.find_executable`` with Ansible's ``get_bin_path`` to find the executable (https://github.com/ansible-collections/community.general/pull/3934).
-- jira - fixed bug where module returns error related to dictionary key ``body`` (https://github.com/ansible-collections/community.general/issues/3419).
-- keycloak - fix parameters types for ``defaultDefaultClientScopes`` and ``defaultOptionalClientScopes`` from list of dictionaries to list of strings (https://github.com/ansible-collections/community.general/pull/4526).
-- keycloak_* - the documented ``validate_certs`` parameter was not taken into account when calling the ``open_url`` function in some cases, thus enforcing certificate validation even when ``validate_certs`` was set to ``false``. (https://github.com/ansible-collections/community.general/pull/4382)
-- keycloak_user_federation - creating a user federation while specifying an ID (that does not exist yet) no longer fail with a 404 Not Found (https://github.com/ansible-collections/community.general/pull/4212).
-- keycloak_user_federation - mappers auto-created by keycloak are matched and merged by their name and no longer create duplicated entries (https://github.com/ansible-collections/community.general/pull/4212).
-- ldap_search - allow it to be used even in check mode (https://github.com/ansible-collections/community.general/issues/3619).
-- linode inventory plugin - fix configuration handling relating to inventory filtering (https://github.com/ansible-collections/community.general/pull/4336).
-- listen_ports_facts - local port regex was not handling well IPv6 only binding. Fixes the regex for ``ss`` (https://github.com/ansible-collections/community.general/pull/4092).
-- lvol - allows logical volumes to be created with certain size arguments prefixed with ``+`` to preserve behavior of older versions of this module (https://github.com/ansible-collections/community.general/issues/3665).
-- lxd connection plugin - replace deprecated ``distutils.spawn.find_executable`` with Ansible's ``get_bin_path`` to find the ``lxc`` executable (https://github.com/ansible-collections/community.general/pull/3934).
-- lxd inventory plugin - do not crash if OS and release metadata are not present
- (https://github.com/ansible-collections/community.general/pull/4351).
-- mail callback plugin - fix crash on Python 3 (https://github.com/ansible-collections/community.general/issues/4025, https://github.com/ansible-collections/community.general/pull/4026).
-- mail callback plugin - fix encoding of the name of sender and recipient (https://github.com/ansible-collections/community.general/issues/4060, https://github.com/ansible-collections/community.general/pull/4061).
-- mksysb - fixed bug for parameter ``backup_dmapi_fs`` was passing the wrong CLI argument (https://github.com/ansible-collections/community.general/pull/3295).
-- nmcli - fix returning "changed" when no mask set for IPv4 or IPv6 addresses on task rerun (https://github.com/ansible-collections/community.general/issues/3768).
-- nmcli - fix returning "changed" when routes parameters set, also suggest new routes4 and routes6 format (https://github.com/ansible-collections/community.general/issues/4131).
-- nmcli - fixed falsely reported changed status when ``mtu`` is omitted with ``dummy`` connections (https://github.com/ansible-collections/community.general/issues/3612, https://github.com/ansible-collections/community.general/pull/3625).
-- nmcli - pass ``flags``, ``ingress``, ``egress`` params to ``nmcli`` (https://github.com/ansible-collections/community.general/issues/1086).
-- nrdp callback plugin - fix error ``string arguments without an encoding`` (https://github.com/ansible-collections/community.general/issues/3903).
-- onepassword - search all valid configuration locations and use the first found (https://github.com/ansible-collections/community.general/pull/4640).
-- opennebula inventory plugin - complete the implementation of ``constructable`` for opennebula inventory plugin. Now ``keyed_groups``, ``compose``, ``groups`` actually work (https://github.com/ansible-collections/community.general/issues/4497).
-- opentelemetry - fix generating a trace with a task containing ``no_log: true`` (https://github.com/ansible-collections/community.general/pull/4043).
-- opentelemetry callback plugin - fix task message attribute that is reported failed regardless of the task result (https://github.com/ansible-collections/community.general/pull/4624).
-- opentelemetry callback plugin - fix warning for the include_tasks (https://github.com/ansible-collections/community.general/pull/4623).
-- opentelemetry_plugin - honour ``ignore_errors`` when a task has failed instead of reporting an error (https://github.com/ansible-collections/community.general/pull/3837).
-- pacman - Use ``--groups`` instead of ``--group`` (https://github.com/ansible-collections/community.general/pull/4312).
-- pacman - fix URL based package installation (https://github.com/ansible-collections/community.general/pull/4286, https://github.com/ansible-collections/community.general/issues/4285).
-- pacman - fix ``upgrade=yes`` (https://github.com/ansible-collections/community.general/pull/4275, https://github.com/ansible-collections/community.general/issues/4274).
-- pacman - fixed bug where ``absent`` state did not work for locally installed packages (https://github.com/ansible-collections/community.general/pull/4464).
-- pacman - make sure that ``packages`` is always returned when ``name`` or ``upgrade`` is specified, also if nothing is done (https://github.com/ansible-collections/community.general/pull/4329).
-- pacman - when the ``update_cache`` option is combined with another option such as ``upgrade``, report ``changed`` based on the actions performed by the latter option. This was the behavior in community.general 4.4.0 and before. In community.general 4.5.0, a task combining these options would always report ``changed`` (https://github.com/ansible-collections/community.general/pull/4318).
-- passwordstore lookup plugin - fix error detection for non-English locales (https://github.com/ansible-collections/community.general/pull/4219).
-- passwordstore lookup plugin - prevent returning path names as passwords by accident (https://github.com/ansible-collections/community.general/issues/4185, https://github.com/ansible-collections/community.general/pull/4192).
-- passwordstore lookup plugin - replace deprecated ``distutils.util.strtobool`` with Ansible's ``convert_bool.boolean`` to interpret values for the ``create``, ``returnall``, ``overwrite``, 'backup``, and ``nosymbols`` options (https://github.com/ansible-collections/community.general/pull/3934).
-- pipx - passes the correct command line option ``--include-apps`` (https://github.com/ansible-collections/community.general/issues/3791).
-- pritunl - fixed bug where pritunl plugin api add unneeded data in ``auth_string`` parameter (https://github.com/ansible-collections/community.general/issues/4527).
-- proxmox - fixed ``onboot`` parameter causing module failures when undefined (https://github.com/ansible-collections/community.general/issues/3844).
-- proxmox inventory plugin - always convert strings that follow the ``key=value[,key=value[...]]`` form into dictionaries (https://github.com/ansible-collections/community.general/pull/4349).
-- proxmox inventory plugin - fix error when parsing container with LXC configs (https://github.com/ansible-collections/community.general/issues/4472, https://github.com/ansible-collections/community.general/pull/4472).
-- proxmox inventory plugin - fixed the ``description`` field being ignored if it contained a comma (https://github.com/ansible-collections/community.general/issues/4348).
-- proxmox inventory plugin - fixed the ``tags_parsed`` field when Proxmox returns a single space for the ``tags`` entry (https://github.com/ansible-collections/community.general/pull/4378).
-- proxmox_kvm - fix a bug when getting a state of VM without name will fail (https://github.com/ansible-collections/community.general/pull/4508).
-- proxmox_kvm - fix error in check when creating or cloning (https://github.com/ansible-collections/community.general/pull/4306).
-- proxmox_kvm - fix error when checking whether Proxmox VM exists (https://github.com/ansible-collections/community.general/pull/4287).
-- python_requirements_info - fails if version operator used without version (https://github.com/ansible-collections/community.general/pull/3785).
-- python_requirements_info - store ``mismatched`` return values per package as documented in the module (https://github.com/ansible-collections/community.general/pull/4078).
-- redfish_command - the iLO4 Redfish implementation only supports the ``image_url`` parameter in the underlying API calls to ``VirtualMediaInsert`` and ``VirtualMediaEject``. Any values set (or the defaults) for ``write_protected`` or ``inserted`` will be ignored (https://github.com/ansible-collections/community.general/pull/4596).
-- say callback plugin - replace deprecated ``distutils.spawn.find_executable`` with Ansible's ``get_bin_path`` to find the ``say`` resp. ``espeak`` executables (https://github.com/ansible-collections/community.general/pull/3934).
-- scaleway_user_data - fix double-quote added where no double-quote is needed to user data in scaleway's server (``Content-type`` -> ``Content-Type``) (https://github.com/ansible-collections/community.general/pull/3940).
-- slack - add ``charset`` to HTTP headers to avoid Slack API warning (https://github.com/ansible-collections/community.general/issues/3932).
-- terraform - fix command options being ignored during planned/plan in function ``build_plan`` such as ``lock`` or ``lock_timeout`` (https://github.com/ansible-collections/community.general/issues/3707, https://github.com/ansible-collections/community.general/pull/3726).
-- terraform - fix list initialization to support both Python 2 and Python 3 (https://github.com/ansible-collections/community.general/issues/4531).
-- vdo - fix options error (https://github.com/ansible-collections/community.general/pull/4163).
-- xattr - fix exception caused by ``_run_xattr()`` raising a ``ValueError`` due to a mishandling of base64-encoded value (https://github.com/ansible-collections/community.general/issues/3673).
-- xbps - fix error message that is reported when installing packages fails (https://github.com/ansible-collections/community.general/pull/4438).
-- yarn - fix incorrect handling of ``yarn list`` and ``yarn global list`` output that could result in fatal error (https://github.com/ansible-collections/community.general/pull/4050).
-- yarn - fix incorrectly reported status when installing a package globally (https://github.com/ansible-collections/community.general/issues/4045, https://github.com/ansible-collections/community.general/pull/4050).
-- yarn - fix missing ``~`` expansion in yarn global install folder which resulted in incorrect task status (https://github.com/ansible-collections/community.general/issues/4045, https://github.com/ansible-collections/community.general/pull/4048).
-- yum_versionlock - fix matching of existing entries with names passed to the module. Match yum and dnf lock format (https://github.com/ansible-collections/community.general/pull/4183).
-- zone connection plugin - replace deprecated ``distutils.spawn.find_executable`` with Ansible's ``get_bin_path`` to find the executable (https://github.com/ansible-collections/community.general/pull/3934).
-- zypper - fix undefined variable when running in check mode (https://github.com/ansible-collections/community.general/pull/4667).
-- zypper - fixed bug that caused zypper to always report [ok] and do nothing on ``state=present`` when all packages in ``name`` had a version specification (https://github.com/ansible-collections/community.general/issues/4371, https://github.com/ansible-collections/community.general/pull/4421).
-
-Known Issues
-------------
-
-- pacman - ``update_cache`` cannot differentiate between up to date and outdated package lists and will report ``changed`` in both situations (https://github.com/ansible-collections/community.general/pull/4318).
-- pacman - binaries specified in the ``executable`` parameter must support ``--print-format`` in order to be used by this module. In particular, AUR helper ``yay`` is known not to currently support it (https://github.com/ansible-collections/community.general/pull/4312).
-
-New Plugins
------------
-
-Filter
-~~~~~~
-
-- counter - Counts hashable elements in a sequence
+This file is a placeholder; a version-specific ``CHANGELOG-vX.rst`` will be generated during releases from fragments
+under ``changelogs/fragments``. On release branches once a release has been created, consult the branch's version-specific
+file for changes that have occurred in that branch.
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index ae10c4afc4..94c5299069 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -31,7 +31,9 @@ Also, consider taking up a valuable, reviewed, but abandoned pull request which
* Try committing your changes with an informative but short commit message.
* Do not squash your commits and force-push to your branch if not needed. Reviews of your pull request are much easier with individual commits to comprehend the pull request history. All commits of your pull request branch will be squashed into one commit by GitHub upon merge.
* Do not add merge commits to your PR. The bot will complain and you will have to rebase ([instructions for rebasing](https://docs.ansible.com/ansible/latest/dev_guide/developing_rebasing.html)) to remove them before your PR can be merged. To avoid that git automatically does merges during pulls, you can configure it to do rebases instead by running `git config pull.rebase true` inside the repository checkout.
-* Make sure your PR includes a [changelog fragment](https://docs.ansible.com/ansible/devel/community/development_process.html#changelogs-how-to). (You must not include a fragment for new modules or new plugins, except for test and filter plugins. Also you shouldn't include one for docs-only changes. If you're not sure, simply don't include one, we'll tell you whether one is needed or not :) )
+* Make sure your PR includes a [changelog fragment](https://docs.ansible.com/ansible/devel/community/collection_development_process.html#creating-a-changelog-fragment).
+ * You must not include a fragment for new modules or new plugins. Also you shouldn't include one for docs-only changes. (If you're not sure, simply don't include one, we'll tell you whether one is needed or not :) )
+ * Please always include a link to the pull request itself, and if the PR is about an issue, also a link to the issue. Also make sure the fragment ends with a period, and begins with a lower-case letter after `-`. (Again, if you don't do this, we'll add suggestions to fix it, so don't worry too much :) )
* Avoid reformatting unrelated parts of the codebase in your PR. These types of changes will likely be requested for reversion, create additional work for reviewers, and may cause approval to be delayed.
You can also read [our Quick-start development guide](https://github.com/ansible/community-docs/blob/main/create_pr_quick_start_guide.rst).
@@ -42,7 +44,49 @@ If you want to test a PR locally, refer to [our testing guide](https://github.co
If you find any inconsistencies or places in this document which can be improved, feel free to raise an issue or pull request to fix it.
-## Run sanity, unit or integration tests locally
+## Run sanity or unit locally (with antsibull-nox)
+
+The easiest way to run sanity and unit tests locally is to use [antsibull-nox](https://ansible.readthedocs.io/projects/antsibull-nox/).
+(If you have [nox](https://nox.thea.codes/en/stable/) installed, it will automatically install antsibull-nox in a virtual environment for you.)
+
+### Sanity tests
+
+The following commands show how to run ansible-test sanity tests:
+
+```.bash
+# Run basic sanity tests for all files in the collection:
+nox -Re ansible-test-sanity-devel
+
+# Run basic sanity tests for the given files and directories:
+nox -Re ansible-test-sanity-devel -- plugins/modules/system/pids.py tests/integration/targets/pids/
+
+# Run all other sanity tests for all files in the collection:
+nox -R
+```
+
+If you replace `-Re` with `-e`, respectively. If you leave `-R` away, then the virtual environments will be re-created. The `-R` re-uses them (if they already exist).
+
+### Unit tests
+
+The following commands show how to run unit tests:
+
+```.bash
+# Run all unit tests:
+nox -Re ansible-test-units-devel
+
+# Run all unit tests for one Python version (a lot faster):
+nox -Re ansible-test-units-devel -- --python 3.13
+
+# Run a specific unit test (for the nmcli module) for one Python version:
+nox -Re ansible-test-units-devel -- --python 3.13 tests/unit/plugins/modules/net_tools/test_nmcli.py
+```
+
+If you replace `-Re` with `-e`, then the virtual environments will be re-created. The `-R` re-uses them (if they already exist).
+
+## Run basic sanity, unit or integration tests locally (with ansible-test)
+
+Instead of using antsibull-nox, you can also run sanity and unit tests with ansible-test directly.
+This also allows you to run integration tests.
You have to check out the repository into a specific path structure to be able to run `ansible-test`. The path to the git checkout must end with `.../ansible_collections/community/general`. Please see [our testing guide](https://github.com/ansible/community-docs/blob/main/test_pr_locally_guide.rst) for instructions on how to check out the repository into a correct path structure. The short version of these instructions is:
@@ -54,16 +98,27 @@ cd ~/dev/ansible_collections/community/general
Then you can run `ansible-test` (which is a part of [ansible-core](https://pypi.org/project/ansible-core/)) inside the checkout. The following example commands expect that you have installed Docker or Podman. Note that Podman has only been supported by more recent ansible-core releases. If you are using Docker, the following will work with Ansible 2.9+.
-The following commands show how to run sanity tests:
+### Basic sanity tests
+
+The following commands show how to run basic sanity tests:
```.bash
-# Run sanity tests for all files in the collection:
+# Run basic sanity tests for all files in the collection:
ansible-test sanity --docker -v
-# Run sanity tests for the given files and directories:
+# Run basic sanity tests for the given files and directories:
ansible-test sanity --docker -v plugins/modules/system/pids.py tests/integration/targets/pids/
```
+### Unit tests
+
+Note that for running unit tests, you need to install required collections in the same folder structure that `community.general` is checked out in.
+Right now, you need to install [`community.internal_test_tools`](https://github.com/ansible-collections/community.internal_test_tools).
+If you want to use the latest version from GitHub, you can run:
+```
+git clone https://github.com/ansible-collections/community.internal_test_tools.git ~/dev/ansible_collections/community/internal_test_tools
+```
+
The following commands show how to run unit tests:
```.bash
@@ -77,13 +132,42 @@ ansible-test units --docker -v --python 3.8
ansible-test units --docker -v --python 3.8 tests/unit/plugins/modules/net_tools/test_nmcli.py
```
+### Integration tests
+
+Note that for running integration tests, you need to install required collections in the same folder structure that `community.general` is checked out in.
+Right now, depending on the test, you need to install [`ansible.posix`](https://github.com/ansible-collections/ansible.posix), [`community.crypto`](https://github.com/ansible-collections/community.crypto), and [`community.docker`](https://github.com/ansible-collections/community.docker):
+If you want to use the latest versions from GitHub, you can run:
+```
+mkdir -p ~/dev/ansible_collections/ansible
+git clone https://github.com/ansible-collections/ansible.posix.git ~/dev/ansible_collections/ansible/posix
+git clone https://github.com/ansible-collections/community.crypto.git ~/dev/ansible_collections/community/crypto
+git clone https://github.com/ansible-collections/community.docker.git ~/dev/ansible_collections/community/docker
+```
+
The following commands show how to run integration tests:
-```.bash
-# Run integration tests for the interfaces_files module in a Docker container using the
-# fedora35 operating system image (the supported images depend on your ansible-core version):
-ansible-test integration --docker fedora35 -v interfaces_file
+#### In Docker
+Integration tests on Docker have the following parameters:
+- `image_name` (required): The name of the Docker image. To get the list of supported Docker images, run
+ `ansible-test integration --help` and look for _target docker images_.
+- `test_name` (optional): The name of the integration test.
+ For modules, this equals the short name of the module; for example, `pacman` in case of `community.general.pacman`.
+ For plugins, the plugin type is added before the plugin's short name, for example `callback_yaml` for the `community.general.yaml` callback.
+```.bash
+# Test all plugins/modules on fedora40
+ansible-test integration -v --docker fedora40
+
+# Template
+ansible-test integration -v --docker image_name test_name
+
+# Example community.general.ini_file module on fedora40 Docker image:
+ansible-test integration -v --docker fedora40 ini_file
+```
+
+#### Without isolation
+
+```.bash
# Run integration tests for the flattened lookup **without any isolation**:
ansible-test integration -v lookup_flattened
```
@@ -112,38 +196,12 @@ Creating new modules and plugins requires a bit more work than other Pull Reques
- Make sure that new plugins and modules have tests (unit tests, integration tests, or both); it is preferable to have some tests
which run in CI.
-4. For modules and action plugins, make sure to create your module/plugin in the correct subdirectory, and add a redirect entry
- in `meta/runtime.yml`. For example, for the `aerospike_migrations` module located in
- `plugins/modules/database/aerospike/aerospike_migrations.py`, you need to create the following entry:
- ```.yaml
- aerospike_migrations:
- redirect: community.general.database.aerospike.aerospike_migrations
- ```
- Here, the relative path `database/aerospike/` is inserted into the module's FQCN (Fully Qualified Collection Name) after the
- collection's name and before the module's name. This must not be done for other plugin types but modules and action plugins!
-
- - Action plugins need to be accompanied by a module, even if the module file only contains documentation
- (`DOCUMENTATION`, `EXAMPLES` and `RETURN`). The module must have the same name and directory path in `plugins/modules/`
- than the action plugin has in `plugins/action/`.
+4. Action plugins need to be accompanied by a module, even if the module file only contains documentation
+ (`DOCUMENTATION`, `EXAMPLES` and `RETURN`). The module must have the same name and directory path in `plugins/modules/`
+ than the action plugin has in `plugins/action/`.
5. Make sure to add a BOTMETA entry for your new module/plugin in `.github/BOTMETA.yml`. Search for other plugins/modules in the
same directory to see how entries could look. You should list all authors either as `maintainers` or under `ignore`. People
listed as `maintainers` will be pinged for new issues and PRs that modify the module/plugin or its tests.
When you add a new plugin/module, we expect that you perform maintainer duty for at least some time after contributing it.
-
-## pre-commit
-
-To help ensure high-quality contributions this repository includes a [pre-commit](https://pre-commit.com) configuration which
-corrects and tests against common issues that would otherwise cause CI to fail. To begin using these pre-commit hooks see
-the [Installation](#installation) section below.
-
-This is optional and not required to contribute to this repository.
-
-### Installation
-
-Follow the [instructions](https://pre-commit.com/#install) provided with pre-commit and run `pre-commit install` under the repository base. If for any reason you would like to disable the pre-commit hooks run `pre-commit uninstall`.
-
-This is optional to run it locally.
-
-You can trigger it locally with `pre-commit run --all-files` or even to run only for a given file `pre-commit run --files YOUR_FILE`.
diff --git a/README.md b/README.md
index 14d9ac95eb..726d9cb872 100644
--- a/README.md
+++ b/README.md
@@ -6,8 +6,12 @@ SPDX-License-Identifier: GPL-3.0-or-later
# Community General Collection
-[](https://dev.azure.com/ansible/community.general/_build?definitionId=31)
+[](https://docs.ansible.com/ansible/devel/collections/community/general/)
+[](https://dev.azure.com/ansible/community.general/_build?definitionId=31)
+[](https://github.com/ansible-collections/community.general/actions)
+[](https://github.com/ansible-collections/community.general/actions)
[](https://codecov.io/gh/ansible-collections/community.general)
+[](https://api.reuse.software/info/github.com/ansible-collections/community.general)
This repository contains the `community.general` Ansible Collection. The collection is a part of the Ansible package and includes many modules and plugins supported by Ansible community which are not part of more specialized community collections.
@@ -21,11 +25,21 @@ We follow [Ansible Code of Conduct](https://docs.ansible.com/ansible/latest/comm
If you encounter abusive behavior violating the [Ansible Code of Conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html), please refer to the [policy violations](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html#policy-violations) section of the Code of Conduct for information on how to raise a complaint.
+## Communication
+
+* Join the Ansible forum:
+ * [Get Help](https://forum.ansible.com/c/help/6): get help or help others. This is for questions about modules or plugins in the collection. Please add appropriate tags if you start new discussions.
+ * [Tag `community-general`](https://forum.ansible.com/tag/community-general): discuss the *collection itself*, instead of specific modules or plugins.
+ * [Social Spaces](https://forum.ansible.com/c/chat/4): gather and interact with fellow enthusiasts.
+ * [News & Announcements](https://forum.ansible.com/c/news/5): track project-wide announcements including social events.
+
+* The Ansible [Bullhorn newsletter](https://docs.ansible.com/ansible/devel/community/communication.html#the-bullhorn): used to announce releases and important changes.
+
+For more information about communication, see the [Ansible communication guide](https://docs.ansible.com/ansible/devel/community/communication.html).
+
## Tested with Ansible
-Tested with the current ansible-core 2.11, ansible-core 2.12, ansible-core 2.13 releases and the current development version of ansible-core. Ansible-core versions before 2.11.0 are not supported. This includes all ansible-base 2.10 and Ansible 2.9 releases.
-
-Parts of this collection will not work with ansible-core 2.11 on Python 3.12+.
+Tested with the current ansible-core 2.17, ansible-core 2.18, ansible-core 2.19, ansible-core 2.20 releases and the current development version of ansible-core. Ansible-core versions before 2.17.0 are not supported. This includes all ansible-base 2.10 and Ansible 2.9 releases.
## External requirements
@@ -33,13 +47,13 @@ Some modules and plugins require external libraries. Please check the requiremen
## Included content
-Please check the included content on the [Ansible Galaxy page for this collection](https://galaxy.ansible.com/community/general) or the [documentation on the Ansible docs site](https://docs.ansible.com/ansible/latest/collections/community/general/).
+Please check the included content on the [Ansible Galaxy page for this collection](https://galaxy.ansible.com/ui/repo/published/community/general/) or the [documentation on the Ansible docs site](https://docs.ansible.com/ansible/latest/collections/community/general/).
## Using this collection
This collection is shipped with the Ansible package. So if you have it installed, no more action is required.
-If you have a minimal installation (only Ansible Core installed) or you want to use the latest version of the collection along with the whole Ansible package, you need to install the collection from [Ansible Galaxy](https://galaxy.ansible.com/community/general) manually with the `ansible-galaxy` command-line tool:
+If you have a minimal installation (only Ansible Core installed) or you want to use the latest version of the collection along with the whole Ansible package, you need to install the collection from [Ansible Galaxy](https://galaxy.ansible.com/ui/repo/published/community/general/) manually with the `ansible-galaxy` command-line tool:
ansible-galaxy collection install community.general
@@ -56,7 +70,7 @@ Note that if you install the collection manually, it will not be upgraded automa
ansible-galaxy collection install community.general --upgrade
```
-You can also install a specific version of the collection, for example, if you need to downgrade when something is broken in the latest version (please report an issue in this repository). Use the following syntax where `X.Y.Z` can be any [available version](https://galaxy.ansible.com/community/general):
+You can also install a specific version of the collection, for example, if you need to downgrade when something is broken in the latest version (please report an issue in this repository). Use the following syntax where `X.Y.Z` can be any [available version](https://galaxy.ansible.com/ui/repo/published/community/general/):
```bash
ansible-galaxy collection install community.general:==X.Y.Z
@@ -98,25 +112,13 @@ It is necessary for maintainers of this collection to be subscribed to:
They also should be subscribed to Ansible's [The Bullhorn newsletter](https://docs.ansible.com/ansible/devel/community/communication.html#the-bullhorn).
-## Communication
-
-We announce important development changes and releases through Ansible's [The Bullhorn newsletter](https://eepurl.com/gZmiEP). If you are a collection developer, be sure you are subscribed.
-
-Join us in the `#ansible` (general use questions and support), `#ansible-community` (community and collection development questions), and other [IRC channels](https://docs.ansible.com/ansible/devel/community/communication.html#irc-channels) on [Libera.chat](https://libera.chat).
-
-We take part in the global quarterly [Ansible Contributor Summit](https://github.com/ansible/community/wiki/Contributor-Summit) virtually or in-person. Track [The Bullhorn newsletter](https://eepurl.com/gZmiEP) and join us.
-
-For more information about communities, meetings and agendas see [Community Wiki](https://github.com/ansible/community/wiki/Community).
-
-For more information about communication, refer to Ansible's the [Communication guide](https://docs.ansible.com/ansible/devel/community/communication.html).
-
## Publishing New Version
See the [Releasing guidelines](https://github.com/ansible/community-docs/blob/main/releasing_collections.rst) to learn how to release this collection.
## Release notes
-See the [changelog](https://github.com/ansible-collections/community.general/blob/stable-5/CHANGELOG.rst).
+See the [changelog](https://github.com/ansible-collections/community.general/blob/main/CHANGELOG.md).
## Roadmap
@@ -139,4 +141,4 @@ See [LICENSES/GPL-3.0-or-later.txt](https://github.com/ansible-collections/commu
Parts of the collection are licensed under the [BSD 2-Clause license](https://github.com/ansible-collections/community.general/blob/main/LICENSES/BSD-2-Clause.txt), the [MIT license](https://github.com/ansible-collections/community.general/blob/main/LICENSES/MIT.txt), and the [PSF 2.0 license](https://github.com/ansible-collections/community.general/blob/main/LICENSES/PSF-2.0.txt).
-All files have a machine readable `SDPX-License-Identifier:` comment denoting its respective license(s) or an equivalent entry in an accompanying `.license` file. Only changelog fragments (which will not be part of a release) are covered by a blanket statement in `.reuse/dep5`. This conforms to the [REUSE specification](https://reuse.software/spec/).
+All files have a machine readable `SDPX-License-Identifier:` comment denoting its respective license(s) or an equivalent entry in an accompanying `.license` file. Only changelog fragments (which will not be part of a release) are covered by a blanket statement in `REUSE.toml`. This conforms to the [REUSE specification](https://reuse.software/spec/).
diff --git a/REUSE.toml b/REUSE.toml
new file mode 100644
index 0000000000..ff95bb8217
--- /dev/null
+++ b/REUSE.toml
@@ -0,0 +1,11 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+version = 1
+
+[[annotations]]
+path = "changelogs/fragments/**"
+precedence = "aggregate"
+SPDX-FileCopyrightText = "Ansible Project"
+SPDX-License-Identifier = "GPL-3.0-or-later"
diff --git a/antsibull-nox.toml b/antsibull-nox.toml
new file mode 100644
index 0000000000..735d572599
--- /dev/null
+++ b/antsibull-nox.toml
@@ -0,0 +1,99 @@
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+# SPDX-FileCopyrightText: 2025 Felix Fontein
+
+[collection_sources]
+"ansible.posix" = "git+https://github.com/ansible-collections/ansible.posix.git,main"
+"community.crypto" = "git+https://github.com/ansible-collections/community.crypto.git,main"
+"community.docker" = "git+https://github.com/ansible-collections/community.docker.git,main"
+"community.internal_test_tools" = "git+https://github.com/ansible-collections/community.internal_test_tools.git,main"
+
+[collection_sources_per_ansible.'2.16']
+# community.crypto's main branch needs ansible-core >= 2.17
+"community.crypto" = "git+https://github.com/ansible-collections/community.crypto.git,stable-2"
+
+[vcs]
+vcs = "git"
+development_branch = "main"
+stable_branches = [ "stable-*" ]
+
+[sessions]
+
+[sessions.lint]
+run_isort = false
+run_black = false
+run_flake8 = false
+run_pylint = false
+run_yamllint = true
+yamllint_config = ".yamllint"
+# yamllint_config_plugins = ".yamllint-docs"
+# yamllint_config_plugins_examples = ".yamllint-examples"
+run_mypy = false
+
+[sessions.docs_check]
+validate_collection_refs="all"
+codeblocks_restrict_types = [
+ "ansible-output",
+ "console",
+ "ini",
+ "json",
+ "python",
+ "shell",
+ "yaml",
+ "yaml+jinja",
+ "text",
+]
+codeblocks_restrict_type_exact_case = true
+codeblocks_allow_without_type = false
+codeblocks_allow_literal_blocks = false
+
+[sessions.license_check]
+
+[sessions.extra_checks]
+run_no_unwanted_files = true
+no_unwanted_files_module_extensions = [".py"]
+no_unwanted_files_yaml_extensions = [".yml"]
+run_action_groups = true
+run_no_trailing_whitespace = true
+no_trailing_whitespace_skip_paths = [
+ "tests/integration/targets/iso_extract/files/test.iso",
+ "tests/integration/targets/java_cert/files/testpkcs.p12",
+ "tests/integration/targets/one_host/files/testhost/tmp/opennebula-fixtures.json.gz",
+ "tests/integration/targets/one_template/files/testhost/tmp/opennebula-fixtures.json.gz",
+ "tests/integration/targets/setup_flatpak_remote/files/repo.tar.xz",
+]
+no_trailing_whitespace_skip_directories = [
+ "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/",
+ "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/input/",
+]
+
+[[sessions.extra_checks.action_groups_config]]
+name = "consul"
+pattern = "^consul_.*$"
+exclusions = [
+ "consul_acl_bootstrap",
+ "consul_kv",
+]
+doc_fragment = "community.general.consul.actiongroup_consul"
+
+[[sessions.extra_checks.action_groups_config]]
+name = "keycloak"
+pattern = "^keycloak_.*$"
+exclusions = [
+ "keycloak_realm_info",
+]
+doc_fragment = "community.general.keycloak.actiongroup_keycloak"
+
+[[sessions.extra_checks.action_groups_config]]
+name = "scaleway"
+pattern = "^scaleway_.*$"
+doc_fragment = "community.general.scaleway.actiongroup_scaleway"
+
+[sessions.build_import_check]
+run_galaxy_importer = true
+
+[sessions.ansible_test_sanity]
+include_devel = true
+
+[sessions.ansible_test_units]
+include_devel = true
diff --git a/changelogs/changelog.yaml b/changelogs/changelog.yaml
index a631499e37..f8129d5d73 100644
--- a/changelogs/changelog.yaml
+++ b/changelogs/changelog.yaml
@@ -1,1078 +1,3 @@
-ancestor: 4.0.0
-releases:
- 5.0.0:
- changes:
- breaking_changes:
- - lists_mergeby and groupby_as_dict filter plugins - adjust filter plugin filename.
- This change is not visible to end-users, it only affects possible other collections
- importing Python paths (https://github.com/ansible-collections/community.general/pull/4625).
- - yarn - remove unsupported and unnecessary ``--no-emoji`` flag (https://github.com/ansible-collections/community.general/pull/4662).
- bugfixes:
- - consul - fixed bug where class ``ConsulService`` was overwriting the field
- ``checks``, preventing the addition of checks to a service (https://github.com/ansible-collections/community.general/pull/4590).
- - gconftool2 - properly escape values when passing them to ``gconftool-2`` (https://github.com/ansible-collections/community.general/pull/4647).
- - gitlab_hook - avoid errors during idempotency check when an attribute does
- not exist (https://github.com/ansible-collections/community.general/pull/4668).
- - onepassword - search all valid configuration locations and use the first found
- (https://github.com/ansible-collections/community.general/pull/4640).
- - opentelemetry callback plugin - fix task message attribute that is reported
- failed regardless of the task result (https://github.com/ansible-collections/community.general/pull/4624).
- - opentelemetry callback plugin - fix warning for the include_tasks (https://github.com/ansible-collections/community.general/pull/4623).
- - redfish_command - the iLO4 Redfish implementation only supports the ``image_url``
- parameter in the underlying API calls to ``VirtualMediaInsert`` and ``VirtualMediaEject``.
- Any values set (or the defaults) for ``write_protected`` or ``inserted`` will
- be ignored (https://github.com/ansible-collections/community.general/pull/4596).
- - terraform - fix list initialization to support both Python 2 and Python 3
- (https://github.com/ansible-collections/community.general/issues/4531).
- - zypper - fix undefined variable when running in check mode (https://github.com/ansible-collections/community.general/pull/4667).
- deprecated_features:
- - ansible_galaxy_install - deprecated support for ``ansible`` 2.9 and ``ansible-base``
- 2.10 (https://github.com/ansible-collections/community.general/pull/4601).
- - dig lookup plugin - the ``DLV`` record type has been decommissioned in 2017
- and support for it will be removed from community.general 6.0.0 (https://github.com/ansible-collections/community.general/pull/4618).
- minor_changes:
- - ModuleHelper module utils - ``ModuleHelperBase` now delegates the attributes
- ``check_mode``, ``get_bin_path``, ``warn``, and ``deprecate`` to the underlying
- ``AnsibleModule`` instance (https://github.com/ansible-collections/community.general/pull/4600).
- - ModuleHelper module utils - ``ModuleHelperBase`` now has a convenience method
- ``do_raise`` (https://github.com/ansible-collections/community.general/pull/4660).
- - ipa_dnsrecord - add new argument ``record_values``, mutually exclusive to
- ``record_value``, which supports multiple values for one record (https://github.com/ansible-collections/community.general/pull/4578).
- - pritunl_user - add ``mac_addresses`` parameter (https://github.com/ansible-collections/community.general/pull/4535).
- - rax_files_objects - minor refactoring improving code quality (https://github.com/ansible-collections/community.general/pull/4649).
- - redfish_* modules - the contents of ``@Message.ExtendedInfo`` will be returned
- as a string in the event that ``@Message.ExtendedInfo.Messages`` does not
- exist. This is likely more useful than the standard HTTP error (https://github.com/ansible-collections/community.general/pull/4596).
- - to_time_unit filter plugins - the time filters has been extended to also allow
- ``0`` as input (https://github.com/ansible-collections/community.general/pull/4612).
- - vmadm - minor refactoring and improvement on the module (https://github.com/ansible-collections/community.general/pull/4581).
- - vmadm - minor refactoring and improvement on the module (https://github.com/ansible-collections/community.general/pull/4648).
- - zfs - minor refactoring in the code (https://github.com/ansible-collections/community.general/pull/4650).
- release_summary: This is release 5.0.0 of ``community.general``, released on
- 2022-05-17.
- fragments:
- - 4065-onepassword-config.yml
- - 4535-pritunl-add-mac_addresses-parameter.yml
- - 4578-ipa_dnsrecord-add_multiple_record_support.yml
- - 4581-vmadm-improvements.yaml
- - 4590-consul-fix-service-checks.yaml
- - 4595-fix-VirtualMediaInsert-iLO4.yml
- - 4600-mh-delegate.yaml
- - 4601-ansible-galaxy-install-deprecate-ansible29-and-210.yaml
- - 4612-time_filter_zero.yml
- - 4618-dig-dlv.yml
- - 4621-terraform-py2-compat.yml
- - 4623-opentelemetry_bug_fix_include_tasks.yml
- - 4624-opentelemetry_bug_fix_hardcoded_value.yml
- - 4625-fix-filter-filenames.yml
- - 4647-gconftool2-command-arg.yaml
- - 4648-vmadm-improvements-2.yaml
- - 4649-rax-files-objects-improvements.yaml
- - 4650-zfs-improvements.yaml
- - 4651-zypper-checkmode-fix.yaml
- - 4660-mh-added-do-raise.yaml
- - 4662-yarn-emoji.yml
- - 4668-gitlab_hook-use-None-for-non-existent-attr.yml
- - 5.0.0.yml
- release_date: '2022-05-17'
- 5.0.0-a1:
- changes:
- breaking_changes:
- - Parts of this collection do not work with ansible-core 2.11 on Python 3.12+.
- Please either upgrade to ansible-core 2.12+, or use Python 3.11 or earlier
- (https://github.com/ansible-collections/community.general/pull/3988).
- - The symbolic links used to implement flatmapping for all modules were removed
- and replaced by ``meta/runtime.yml`` redirects. This effectively breaks compatibility
- with Ansible 2.9 for all modules (without using their "long" names, which
- is discouraged and which can change without previous notice since they are
- considered an implementation detail) (https://github.com/ansible-collections/community.general/pull/4548).
- - a_module test plugin - remove Ansible 2.9 compatibility code (https://github.com/ansible-collections/community.general/pull/4548).
- - archive - remove Ansible 2.9 compatibility code (https://github.com/ansible-collections/community.general/pull/4548).
- - git_config - remove Ansible 2.9 and early ansible-base 2.10 compatibility
- code (https://github.com/ansible-collections/community.general/pull/4548).
- - java_keystore - remove Ansible 2.9 compatibility code (https://github.com/ansible-collections/community.general/pull/4548).
- - lists_mergeby filter plugin - remove Ansible 2.9 compatibility code (https://github.com/ansible-collections/community.general/pull/4548).
- - maven_artifact - remove Ansible 2.9 compatibility code (https://github.com/ansible-collections/community.general/pull/4548).
- - memcached cache plugin - remove Ansible 2.9 compatibility code (https://github.com/ansible-collections/community.general/pull/4548).
- - path_join filter plugin shim - remove Ansible 2.9 compatibility code (https://github.com/ansible-collections/community.general/pull/4548).
- - redis cache plugin - remove Ansible 2.9 compatibility code (https://github.com/ansible-collections/community.general/pull/4548).
- bugfixes:
- - Various modules and plugins - use vendored version of ``distutils.version``
- instead of the deprecated Python standard library ``distutils`` (https://github.com/ansible-collections/community.general/pull/3936).
- - a_module test plugin - fix crash when testing a module name that was tombstoned
- (https://github.com/ansible-collections/community.general/pull/3660).
- - alternatives - fix output parsing for alternatives groups (https://github.com/ansible-collections/community.general/pull/3976).
- - cargo - fix detection of outdated packages when ``state=latest`` (https://github.com/ansible-collections/community.general/pull/4052).
- - cargo - fix incorrectly reported changed status for packages with a name containing
- a hyphen (https://github.com/ansible-collections/community.general/issues/4044,
- https://github.com/ansible-collections/community.general/pull/4052).
- - counter_enabled callback plugin - fix output to correctly display host and
- task counters in serial mode (https://github.com/ansible-collections/community.general/pull/3709).
- - dconf - skip processes that disappeared while we inspected them (https://github.com/ansible-collections/community.general/issues/4151).
- - dnsmadeeasy - fix failure on deleting DNS entries when API response does not
- contain monitor value (https://github.com/ansible-collections/community.general/issues/3620).
- - dsv lookup plugin - raise an Ansible error if the wrong ``python-dsv-sdk``
- version is installed (https://github.com/ansible-collections/community.general/pull/4422).
- - filesize - add support for busybox dd implementation, that is used by default
- on Alpine linux (https://github.com/ansible-collections/community.general/pull/4288,
- https://github.com/ansible-collections/community.general/issues/4259).
- - git_branch - remove deprecated and unnecessary branch ``unprotect`` method
- (https://github.com/ansible-collections/community.general/pull/4496).
- - github_repo - ``private`` and ``description`` attributes should not be set
- to default values when the repo already exists (https://github.com/ansible-collections/community.general/pull/2386).
- - 'gitlab_group - improve searching for projects inside group on deletion (https://github.com/ansible-collections/community.general/pull/4491).
-
- '
- - 'gitlab_group_members - handle more than 20 groups when finding a group (https://github.com/ansible-collections/community.general/pull/4491,
- https://github.com/ansible-collections/community.general/issues/4460, https://github.com/ansible-collections/community.general/issues/3729).
-
- '
- - gitlab_group_variable - add missing documentation about GitLab versions that
- support ``environment_scope`` and ``variable_type`` (https://github.com/ansible-collections/community.general/pull/4038).
- - 'gitlab_group_variable - allow to set same variable name under different environment
- scopes. Due this change, the return value ``group_variable`` differs from
- previous version in check mode. It was counting ``updated`` values, because
- it was accidentally overwriting environment scopes (https://github.com/ansible-collections/community.general/pull/4038).
-
- '
- - gitlab_group_variable - fix idempotent change behaviour for float and integer
- variables (https://github.com/ansible-collections/community.general/pull/4038).
- - 'gitlab_hook - handle more than 20 hooks when finding a hook (https://github.com/ansible-collections/community.general/pull/4491).
-
- '
- - 'gitlab_project - handle more than 20 namespaces when finding a namespace
- (https://github.com/ansible-collections/community.general/pull/4491).
-
- '
- - 'gitlab_project_members - handle more than 20 projects and users when finding
- a project resp. user (https://github.com/ansible-collections/community.general/pull/4491).
-
- '
- - gitlab_project_variable - ``value`` is not necessary when deleting variables
- (https://github.com/ansible-collections/community.general/pull/4150).
- - gitlab_project_variable - add missing documentation about GitLab versions
- that support ``environment_scope`` and ``variable_type`` (https://github.com/ansible-collections/community.general/issues/4038).
- - 'gitlab_project_variable - allow to set same variable name under different
- environment scopes. Due this change, the return value ``project_variable``
- differs from previous version in check mode. It was counting ``updated`` values,
- because it was accidentally overwriting environment scopes (https://github.com/ansible-collections/community.general/issues/4038).
-
- '
- - gitlab_project_variable - fix idempotent change behaviour for float and integer
- variables (https://github.com/ansible-collections/community.general/issues/4038).
- - gitlab_runner - make ``project`` and ``owned`` mutually exclusive (https://github.com/ansible-collections/community.general/pull/4136).
- - gitlab_runner - use correct API endpoint to create and retrieve project level
- runners when using ``project`` (https://github.com/ansible-collections/community.general/pull/3965).
- - 'gitlab_user - handle more than 20 users and SSH keys when finding a user
- resp. SSH key (https://github.com/ansible-collections/community.general/pull/4491).
-
- '
- - homebrew_cask - fix force install operation (https://github.com/ansible-collections/community.general/issues/3703).
- - icinga2 inventory plugin - handle 404 error when filter produces no results
- (https://github.com/ansible-collections/community.general/issues/3875, https://github.com/ansible-collections/community.general/pull/3906).
- - imc_rest - fixes the module failure due to the usage of ``itertools.izip_longest``
- which is not available in Python 3 (https://github.com/ansible-collections/community.general/issues/4206).
- - ini_file - when removing nothing do not report changed (https://github.com/ansible-collections/community.general/issues/4154).
- - interfaces_file - fixed the check for existing option in interface (https://github.com/ansible-collections/community.general/issues/3841).
- - jail connection plugin - replace deprecated ``distutils.spawn.find_executable``
- with Ansible's ``get_bin_path`` to find the executable (https://github.com/ansible-collections/community.general/pull/3934).
- - jira - fixed bug where module returns error related to dictionary key ``body``
- (https://github.com/ansible-collections/community.general/issues/3419).
- - keycloak - fix parameters types for ``defaultDefaultClientScopes`` and ``defaultOptionalClientScopes``
- from list of dictionaries to list of strings (https://github.com/ansible-collections/community.general/pull/4526).
- - keycloak_* - the documented ``validate_certs`` parameter was not taken into
- account when calling the ``open_url`` function in some cases, thus enforcing
- certificate validation even when ``validate_certs`` was set to ``false``.
- (https://github.com/ansible-collections/community.general/pull/4382)
- - keycloak_user_federation - creating a user federation while specifying an
- ID (that does not exist yet) no longer fail with a 404 Not Found (https://github.com/ansible-collections/community.general/pull/4212).
- - keycloak_user_federation - mappers auto-created by keycloak are matched and
- merged by their name and no longer create duplicated entries (https://github.com/ansible-collections/community.general/pull/4212).
- - ldap_search - allow it to be used even in check mode (https://github.com/ansible-collections/community.general/issues/3619).
- - linode inventory plugin - fix configuration handling relating to inventory
- filtering (https://github.com/ansible-collections/community.general/pull/4336).
- - listen_ports_facts - local port regex was not handling well IPv6 only binding.
- Fixes the regex for ``ss`` (https://github.com/ansible-collections/community.general/pull/4092).
- - lvol - allows logical volumes to be created with certain size arguments prefixed
- with ``+`` to preserve behavior of older versions of this module (https://github.com/ansible-collections/community.general/issues/3665).
- - lxd connection plugin - replace deprecated ``distutils.spawn.find_executable``
- with Ansible's ``get_bin_path`` to find the ``lxc`` executable (https://github.com/ansible-collections/community.general/pull/3934).
- - 'lxd inventory plugin - do not crash if OS and release metadata are not present
-
- (https://github.com/ansible-collections/community.general/pull/4351).
-
- '
- - mail callback plugin - fix crash on Python 3 (https://github.com/ansible-collections/community.general/issues/4025,
- https://github.com/ansible-collections/community.general/pull/4026).
- - mail callback plugin - fix encoding of the name of sender and recipient (https://github.com/ansible-collections/community.general/issues/4060,
- https://github.com/ansible-collections/community.general/pull/4061).
- - mksysb - fixed bug for parameter ``backup_dmapi_fs`` was passing the wrong
- CLI argument (https://github.com/ansible-collections/community.general/pull/3295).
- - nmcli - fix returning "changed" when no mask set for IPv4 or IPv6 addresses
- on task rerun (https://github.com/ansible-collections/community.general/issues/3768).
- - nmcli - fix returning "changed" when routes parameters set, also suggest new
- routes4 and routes6 format (https://github.com/ansible-collections/community.general/issues/4131).
- - nmcli - fixed falsely reported changed status when ``mtu`` is omitted with
- ``dummy`` connections (https://github.com/ansible-collections/community.general/issues/3612,
- https://github.com/ansible-collections/community.general/pull/3625).
- - nmcli - pass ``flags``, ``ingress``, ``egress`` params to ``nmcli`` (https://github.com/ansible-collections/community.general/issues/1086).
- - nrdp callback plugin - fix error ``string arguments without an encoding``
- (https://github.com/ansible-collections/community.general/issues/3903).
- - opennebula inventory plugin - complete the implementation of ``constructable``
- for opennebula inventory plugin. Now ``keyed_groups``, ``compose``, ``groups``
- actually work (https://github.com/ansible-collections/community.general/issues/4497).
- - 'opentelemetry - fix generating a trace with a task containing ``no_log: true``
- (https://github.com/ansible-collections/community.general/pull/4043).'
- - opentelemetry_plugin - honour ``ignore_errors`` when a task has failed instead
- of reporting an error (https://github.com/ansible-collections/community.general/pull/3837).
- - pacman - Use ``--groups`` instead of ``--group`` (https://github.com/ansible-collections/community.general/pull/4312).
- - pacman - fix URL based package installation (https://github.com/ansible-collections/community.general/pull/4286,
- https://github.com/ansible-collections/community.general/issues/4285).
- - pacman - fix ``upgrade=yes`` (https://github.com/ansible-collections/community.general/pull/4275,
- https://github.com/ansible-collections/community.general/issues/4274).
- - pacman - fixed bug where ``absent`` state did not work for locally installed
- packages (https://github.com/ansible-collections/community.general/pull/4464).
- - pacman - make sure that ``packages`` is always returned when ``name`` or ``upgrade``
- is specified, also if nothing is done (https://github.com/ansible-collections/community.general/pull/4329).
- - pacman - when the ``update_cache`` option is combined with another option
- such as ``upgrade``, report ``changed`` based on the actions performed by
- the latter option. This was the behavior in community.general 4.4.0 and before.
- In community.general 4.5.0, a task combining these options would always report
- ``changed`` (https://github.com/ansible-collections/community.general/pull/4318).
- - passwordstore lookup plugin - fix error detection for non-English locales
- (https://github.com/ansible-collections/community.general/pull/4219).
- - passwordstore lookup plugin - prevent returning path names as passwords by
- accident (https://github.com/ansible-collections/community.general/issues/4185,
- https://github.com/ansible-collections/community.general/pull/4192).
- - passwordstore lookup plugin - replace deprecated ``distutils.util.strtobool``
- with Ansible's ``convert_bool.boolean`` to interpret values for the ``create``,
- ``returnall``, ``overwrite``, 'backup``, and ``nosymbols`` options (https://github.com/ansible-collections/community.general/pull/3934).
- - pipx - passes the correct command line option ``--include-apps`` (https://github.com/ansible-collections/community.general/issues/3791).
- - pritunl - fixed bug where pritunl plugin api add unneeded data in ``auth_string``
- parameter (https://github.com/ansible-collections/community.general/issues/4527).
- - proxmox - fixed ``onboot`` parameter causing module failures when undefined
- (https://github.com/ansible-collections/community.general/issues/3844).
- - proxmox inventory plugin - always convert strings that follow the ``key=value[,key=value[...]]``
- form into dictionaries (https://github.com/ansible-collections/community.general/pull/4349).
- - proxmox inventory plugin - fix error when parsing container with LXC configs
- (https://github.com/ansible-collections/community.general/issues/4472, https://github.com/ansible-collections/community.general/pull/4472).
- - proxmox inventory plugin - fixed the ``description`` field being ignored if
- it contained a comma (https://github.com/ansible-collections/community.general/issues/4348).
- - proxmox inventory plugin - fixed the ``tags_parsed`` field when Proxmox returns
- a single space for the ``tags`` entry (https://github.com/ansible-collections/community.general/pull/4378).
- - proxmox_kvm - fix a bug when getting a state of VM without name will fail
- (https://github.com/ansible-collections/community.general/pull/4508).
- - proxmox_kvm - fix error in check when creating or cloning (https://github.com/ansible-collections/community.general/pull/4306).
- - proxmox_kvm - fix error when checking whether Proxmox VM exists (https://github.com/ansible-collections/community.general/pull/4287).
- - python_requirements_info - fails if version operator used without version
- (https://github.com/ansible-collections/community.general/pull/3785).
- - python_requirements_info - store ``mismatched`` return values per package
- as documented in the module (https://github.com/ansible-collections/community.general/pull/4078).
- - say callback plugin - replace deprecated ``distutils.spawn.find_executable``
- with Ansible's ``get_bin_path`` to find the ``say`` resp. ``espeak`` executables
- (https://github.com/ansible-collections/community.general/pull/3934).
- - scaleway_user_data - fix double-quote added where no double-quote is needed
- to user data in scaleway's server (``Content-type`` -> ``Content-Type``) (https://github.com/ansible-collections/community.general/pull/3940).
- - slack - add ``charset`` to HTTP headers to avoid Slack API warning (https://github.com/ansible-collections/community.general/issues/3932).
- - terraform - fix command options being ignored during planned/plan in function
- ``build_plan`` such as ``lock`` or ``lock_timeout`` (https://github.com/ansible-collections/community.general/issues/3707,
- https://github.com/ansible-collections/community.general/pull/3726).
- - vdo - fix options error (https://github.com/ansible-collections/community.general/pull/4163).
- - xattr - fix exception caused by ``_run_xattr()`` raising a ``ValueError``
- due to a mishandling of base64-encoded value (https://github.com/ansible-collections/community.general/issues/3673).
- - xbps - fix error message that is reported when installing packages fails (https://github.com/ansible-collections/community.general/pull/4438).
- - yarn - fix incorrect handling of ``yarn list`` and ``yarn global list`` output
- that could result in fatal error (https://github.com/ansible-collections/community.general/pull/4050).
- - yarn - fix incorrectly reported status when installing a package globally
- (https://github.com/ansible-collections/community.general/issues/4045, https://github.com/ansible-collections/community.general/pull/4050).
- - yarn - fix missing ``~`` expansion in yarn global install folder which resulted
- in incorrect task status (https://github.com/ansible-collections/community.general/issues/4045,
- https://github.com/ansible-collections/community.general/pull/4048).
- - yum_versionlock - fix matching of existing entries with names passed to the
- module. Match yum and dnf lock format (https://github.com/ansible-collections/community.general/pull/4183).
- - zone connection plugin - replace deprecated ``distutils.spawn.find_executable``
- with Ansible's ``get_bin_path`` to find the executable (https://github.com/ansible-collections/community.general/pull/3934).
- - zypper - fixed bug that caused zypper to always report [ok] and do nothing
- on ``state=present`` when all packages in ``name`` had a version specification
- (https://github.com/ansible-collections/community.general/issues/4371, https://github.com/ansible-collections/community.general/pull/4421).
- deprecated_features:
- - gem - the default of the ``norc`` option has been deprecated and will change
- to ``true`` in community.general 6.0.0. Explicitly specify a value to avoid
- a deprecation warning (https://github.com/ansible-collections/community.general/pull/4517).
- - mail callback plugin - not specifying ``sender`` is deprecated and will be
- disallowed in community.general 6.0.0 (https://github.com/ansible-collections/community.general/pull/4140).
- - module_helper module utils - deprecated the attribute ``ModuleHelper.VarDict``
- (https://github.com/ansible-collections/community.general/pull/3801).
- - nmcli - deprecate default hairpin mode for a bridge. This so we can change
- it to ``false`` in community.general 7.0.0, as this is also the default in
- ``nmcli`` (https://github.com/ansible-collections/community.general/pull/4334).
- - 'pacman - from community.general 5.0.0 on, the ``changed`` status of ``update_cache``
- will no longer be ignored if ``name`` or ``upgrade`` is specified. To keep
- the old behavior, add something like ``register: result`` and ``changed_when:
- result.packages | length > 0`` to your task (https://github.com/ansible-collections/community.general/pull/4329).'
- - proxmox inventory plugin - the current default ``true`` of the ``want_proxmox_nodes_ansible_host``
- option has been deprecated. The default will change to ``false`` in community.general
- 6.0.0. To keep the current behavior, explicitly set ``want_proxmox_nodes_ansible_host``
- to ``true`` in your inventory configuration. We suggest to already switch
- to the new behavior by explicitly setting it to ``false``, and by using ``compose:``
- to set ``ansible_host`` to the correct value. See the examples in the plugin
- documentation for details (https://github.com/ansible-collections/community.general/pull/4466).
- - vmadm - deprecated module parameter ``debug`` that was not used anywhere (https://github.com/ansible-collections/community.general/pull/4580).
- known_issues:
- - pacman - ``update_cache`` cannot differentiate between up to date and outdated
- package lists and will report ``changed`` in both situations (https://github.com/ansible-collections/community.general/pull/4318).
- - pacman - binaries specified in the ``executable`` parameter must support ``--print-format``
- in order to be used by this module. In particular, AUR helper ``yay`` is known
- not to currently support it (https://github.com/ansible-collections/community.general/pull/4312).
- major_changes:
- - The community.general collection no longer supports Ansible 2.9 and ansible-base
- 2.10. While we take no active measures to prevent usage, we will remove a
- lot of compatibility code and other compatility measures that will effectively
- prevent using most content from this collection with Ansible 2.9, and some
- content of this collection with ansible-base 2.10. Both Ansible 2.9 and ansible-base
- 2.10 will very soon be End of Life and if you are still using them, you should
- consider upgrading to ansible-core 2.11 or later as soon as possible (https://github.com/ansible-collections/community.general/pull/4548).
- minor_changes:
- - Avoid internal ansible-core module_utils in favor of equivalent public API
- available since at least Ansible 2.9. This fixes some instances added since
- the last time this was fixed (https://github.com/ansible-collections/community.general/pull/4232).
- - Remove vendored copy of ``distutils.version`` in favor of vendored copy included
- with ansible-core 2.12+. For ansible-core 2.11, uses ``distutils.version``
- for Python < 3.12. There is no support for ansible-core 2.11 with Python 3.12+
- (https://github.com/ansible-collections/community.general/pull/3988).
- - aix_filesystem - calling ``run_command`` with arguments as ``list`` instead
- of ``str`` (https://github.com/ansible-collections/community.general/pull/3833).
- - aix_lvg - calling ``run_command`` with arguments as ``list`` instead of ``str``
- (https://github.com/ansible-collections/community.general/pull/3834).
- - alternatives - add ``state`` parameter, which provides control over whether
- the alternative should be set as the active selection for its alternatives
- group (https://github.com/ansible-collections/community.general/issues/4543,
- https://github.com/ansible-collections/community.general/pull/4557).
- - ansible_galaxy_install - added option ``no_deps`` to the module (https://github.com/ansible-collections/community.general/issues/4174).
- - atomic_container - minor refactoring (https://github.com/ansible-collections/community.general/pull/4567).
- - clc_alert_policy - minor refactoring (https://github.com/ansible-collections/community.general/pull/4556).
- - clc_group - minor refactoring (https://github.com/ansible-collections/community.general/pull/4556).
- - clc_loadbalancer - minor refactoring (https://github.com/ansible-collections/community.general/pull/4556).
- - clc_server - minor refactoring (https://github.com/ansible-collections/community.general/pull/4556).
- - cmd_runner module util - reusable command runner with consistent argument
- formatting and sensible defaults (https://github.com/ansible-collections/community.general/pull/4476).
- - cobbler inventory plugin - add ``include_profiles`` option (https://github.com/ansible-collections/community.general/pull/4068).
- - datadog_monitor - support new datadog event monitor of type `event-v2 alert`
- (https://github.com/ansible-collections/community.general/pull/4457)
- - filesystem - add support for resizing btrfs (https://github.com/ansible-collections/community.general/issues/4465).
- - gitlab - add more token authentication support with the new options ``api_oauth_token``
- and ``api_job_token`` (https://github.com/ansible-collections/community.general/issues/705).
- - gitlab - clean up modules and utils (https://github.com/ansible-collections/community.general/pull/3694).
- - gitlab_group, gitlab_project - add new option ``avatar_path`` (https://github.com/ansible-collections/community.general/pull/3792).
- - gitlab_group_variable - new ``variables`` parameter (https://github.com/ansible-collections/community.general/pull/4038
- and https://github.com/ansible-collections/community.general/issues/4074).
- - gitlab_project - add new option ``default_branch`` to gitlab_project (if ``readme
- = true``) (https://github.com/ansible-collections/community.general/pull/3792).
- - gitlab_project_variable - new ``variables`` parameter (https://github.com/ansible-collections/community.general/issues/4038).
- - hponcfg - revamped module using ModuleHelper (https://github.com/ansible-collections/community.general/pull/3840).
- - icinga2 inventory plugin - added the ``display_name`` field to variables (https://github.com/ansible-collections/community.general/issues/3875,
- https://github.com/ansible-collections/community.general/pull/3906).
- - icinga2 inventory plugin - implemented constructed interface (https://github.com/ansible-collections/community.general/pull/4088).
- - icinga2 inventory plugin - inventory object names are changable using ``inventory_attr``
- in your config file to the host object name, address, or display_name fields
- (https://github.com/ansible-collections/community.general/issues/3875, https://github.com/ansible-collections/community.general/pull/3906).
- - ip_netns - calling ``run_command`` with arguments as ``list`` instead of ``str``
- (https://github.com/ansible-collections/community.general/pull/3822).
- - ipa_dnszone - ``dynamicupdate`` is now a boolean parameter, instead of a string
- parameter accepting ``"true"`` and ``"false"``. Also the module is now idempotent
- with respect to ``dynamicupdate`` (https://github.com/ansible-collections/community.general/pull/3374).
- - ipa_dnszone - add DNS zone synchronization support (https://github.com/ansible-collections/community.general/pull/3374).
- - ipa_service - add ``skip_host_check`` parameter. (https://github.com/ansible-collections/community.general/pull/4417).
- - ipmi_boot - add support for user-specified IPMI encryption key (https://github.com/ansible-collections/community.general/issues/3698).
- - ipmi_power - add ``machine`` option to ensure the power state via the remote
- target address (https://github.com/ansible-collections/community.general/pull/3968).
- - ipmi_power - add support for user-specified IPMI encryption key (https://github.com/ansible-collections/community.general/issues/3698).
- - iso_extract - calling ``run_command`` with arguments as ``list`` instead of
- ``str`` (https://github.com/ansible-collections/community.general/pull/3805).
- - java_cert - calling ``run_command`` with arguments as ``list`` instead of
- ``str`` (https://github.com/ansible-collections/community.general/pull/3835).
- - jira - add support for Bearer token auth (https://github.com/ansible-collections/community.general/pull/3838).
- - jira - when creating a comment, ``fields`` now is used for additional data
- (https://github.com/ansible-collections/community.general/pull/4304).
- - keycloak_* modules - added connection timeout parameter when calling server
- (https://github.com/ansible-collections/community.general/pull/4168).
- - keycloak_client - add ``always_display_in_console`` parameter (https://github.com/ansible-collections/community.general/issues/4390).
- - keycloak_client - add ``default_client_scopes`` and ``optional_client_scopes``
- parameters. (https://github.com/ansible-collections/community.general/pull/4385).
- - keycloak_user_federation - add sssd user federation support (https://github.com/ansible-collections/community.general/issues/3767).
- - ldap_entry - add support for recursive deletion (https://github.com/ansible-collections/community.general/issues/3613).
- - linode inventory plugin - add support for caching inventory results (https://github.com/ansible-collections/community.general/pull/4179).
- - linode inventory plugin - allow templating of ``access_token`` variable in
- Linode inventory plugin (https://github.com/ansible-collections/community.general/pull/4040).
- - listen_ports_facts - add support for ``ss`` command besides ``netstat`` (https://github.com/ansible-collections/community.general/pull/3708).
- - lists_mergeby filter plugin - add parameters ``list_merge`` and ``recursive``.
- These are only supported when used with ansible-base 2.10 or ansible-core,
- but not with Ansible 2.9 (https://github.com/ansible-collections/community.general/pull/4058).
- - logentries - calling ``run_command`` with arguments as ``list`` instead of
- ``str`` (https://github.com/ansible-collections/community.general/pull/3807).
- - logstash_plugin - calling ``run_command`` with arguments as ``list`` instead
- of ``str`` (https://github.com/ansible-collections/community.general/pull/3808).
- - lxc_container - added ``wait_for_container`` parameter. If ``true`` the module
- will wait until the running task reports success as the status (https://github.com/ansible-collections/community.general/pull/4039).
- - lxc_container - calling ``run_command`` with arguments as ``list`` instead
- of ``str`` (https://github.com/ansible-collections/community.general/pull/3851).
- - lxd connection plugin - make sure that ``ansible_lxd_host``, ``ansible_executable``,
- and ``ansible_lxd_executable`` work (https://github.com/ansible-collections/community.general/pull/3798).
- - lxd inventory plugin - support virtual machines (https://github.com/ansible-collections/community.general/pull/3519).
- - lxd_container - adds ``project`` option to allow selecting project for LXD
- instance (https://github.com/ansible-collections/community.general/pull/4479).
- - lxd_container - adds ``type`` option which also allows to operate on virtual
- machines and not just containers (https://github.com/ansible-collections/community.general/pull/3661).
- - lxd_profile - adds ``project`` option to allow selecting project for LXD profile
- (https://github.com/ansible-collections/community.general/pull/4479).
- - mail callback plugin - add ``Message-ID`` and ``Date`` headers (https://github.com/ansible-collections/community.general/issues/4055,
- https://github.com/ansible-collections/community.general/pull/4056).
- - mail callback plugin - properly use Ansible's option handling to split lists
- (https://github.com/ansible-collections/community.general/pull/4140).
- - mattermost - add the possibility to send attachments instead of text messages
- (https://github.com/ansible-collections/community.general/pull/3946).
- - mksysb - revamped the module using ``ModuleHelper`` (https://github.com/ansible-collections/community.general/pull/3295).
- - module_helper module utils - added decorators ``check_mode_skip`` and ``check_mode_skip_returns``
- for skipping methods when ``check_mode=True`` (https://github.com/ansible-collections/community.general/pull/3849).
- - monit - calling ``run_command`` with arguments as ``list`` instead of ``str``
- (https://github.com/ansible-collections/community.general/pull/3821).
- - nmap inventory plugin - add ``sudo`` option in plugin in order to execute
- ``sudo nmap`` so that ``nmap`` runs with elevated privileges (https://github.com/ansible-collections/community.general/pull/4506).
- - nmcli - add ``wireguard`` connection type (https://github.com/ansible-collections/community.general/pull/3985).
- - nmcli - add missing connection aliases ``802-3-ethernet`` and ``802-11-wireless``
- (https://github.com/ansible-collections/community.general/pull/4108).
- - nmcli - add multiple addresses support for ``ip4`` parameter (https://github.com/ansible-collections/community.general/issues/1088,
- https://github.com/ansible-collections/community.general/pull/3738).
- - nmcli - add multiple addresses support for ``ip6`` parameter (https://github.com/ansible-collections/community.general/issues/1088).
- - nmcli - add support for ``eui64`` and ``ipv6privacy`` parameters (https://github.com/ansible-collections/community.general/issues/3357).
- - nmcli - adds ``routes6`` and ``route_metric6`` parameters for supporting IPv6
- routes (https://github.com/ansible-collections/community.general/issues/4059).
- - nmcli - remove nmcli modify dependency on ``type`` parameter (https://github.com/ansible-collections/community.general/issues/2858).
- - nomad_job - minor refactoring (https://github.com/ansible-collections/community.general/pull/4567).
- - nomad_job_info - minor refactoring (https://github.com/ansible-collections/community.general/pull/4567).
- - npm - add ability to use ``production`` flag when ``ci`` is set (https://github.com/ansible-collections/community.general/pull/4299).
- - open_iscsi - extended module to allow rescanning of established session for
- one or all targets (https://github.com/ansible-collections/community.general/issues/3763).
- - opennebula - add the release action for VMs in the ``HOLD`` state (https://github.com/ansible-collections/community.general/pull/4036).
- - opentelemetry_plugin - enrich service when using the ``docker_login`` (https://github.com/ansible-collections/community.general/pull/4104).
- - opentelemetry_plugin - enrich service when using the ``jenkins``, ``hetzner``
- or ``jira`` modules (https://github.com/ansible-collections/community.general/pull/4105).
- - packet_device - minor refactoring (https://github.com/ansible-collections/community.general/pull/4567).
- - packet_sshkey - minor refactoring (https://github.com/ansible-collections/community.general/pull/4567).
- - packet_volume - minor refactoring (https://github.com/ansible-collections/community.general/pull/4567).
- - pacman - add ``remove_nosave`` parameter to avoid saving modified configuration
- files as ``.pacsave`` files. (https://github.com/ansible-collections/community.general/pull/4316,
- https://github.com/ansible-collections/community.general/issues/4315).
- - pacman - add ``stdout`` and ``stderr`` as return values (https://github.com/ansible-collections/community.general/pull/3758).
- - pacman - now implements proper change detection for ``update_cache=true``.
- Adds ``cache_updated`` return value to when ``update_cache=true`` to report
- this result independently of the module's overall changed return value (https://github.com/ansible-collections/community.general/pull/4337).
- - pacman - the module has been rewritten and is now much faster when using ``state=latest``.
- Operations are now done all packages at once instead of package per package
- and the configured output format of ``pacman`` no longer affect the module's
- operation. (https://github.com/ansible-collections/community.general/pull/3907,
- https://github.com/ansible-collections/community.general/issues/3783, https://github.com/ansible-collections/community.general/issues/4079)
- - passwordstore lookup plugin - add configurable ``lock`` and ``locktimeout``
- options to avoid race conditions in itself and in the ``pass`` utility it
- calls. By default, the plugin now locks on write operations (https://github.com/ansible-collections/community.general/pull/4194).
- - pipx - added options ``editable`` and ``pip_args`` (https://github.com/ansible-collections/community.general/issues/4300).
- - profitbricks - minor refactoring (https://github.com/ansible-collections/community.general/pull/4567).
- - proxmox - add ``clone`` parameter (https://github.com/ansible-collections/community.general/pull/3930).
- - proxmox - minor refactoring (https://github.com/ansible-collections/community.general/pull/4567).
- - proxmox inventory plugin - add support for client-side jinja filters (https://github.com/ansible-collections/community.general/issues/3553).
- - proxmox inventory plugin - add support for templating the ``url``, ``user``,
- and ``password`` options (https://github.com/ansible-collections/community.general/pull/4418).
- - proxmox inventory plugin - add token authentication as an alternative to username/password
- (https://github.com/ansible-collections/community.general/pull/4540).
- - proxmox inventory plugin - parse LXC configs returned by the proxmox API (https://github.com/ansible-collections/community.general/pull/4472).
- - proxmox modules - move ``HAS_PROXMOXER`` check into ``module_utils`` (https://github.com/ansible-collections/community.general/pull/4030).
- - proxmox modules - move common code into ``module_utils`` (https://github.com/ansible-collections/community.general/pull/4029).
- - proxmox_kvm - added EFI disk support when creating VM with OVMF UEFI BIOS
- with new ``efidisk0`` option (https://github.com/ansible-collections/community.general/pull/4106,
- https://github.com/ansible-collections/community.general/issues/1638).
- - proxmox_kwm - add ``win11`` to ``ostype`` parameter for Windows 11 and Windows
- Server 2022 support (https://github.com/ansible-collections/community.general/issues/4023,
- https://github.com/ansible-collections/community.general/pull/4191).
- - proxmox_snap - add restore snapshot option (https://github.com/ansible-collections/community.general/pull/4377).
- - proxmox_snap - fixed timeout value to correctly reflect time in seconds. The
- timeout was off by one second (https://github.com/ansible-collections/community.general/pull/4377).
- - puppet - remove deprecation for ``show_diff`` parameter. Its alias ``show-diff``
- is still deprecated and will be removed in community.general 7.0.0 (https://github.com/ansible-collections/community.general/pull/3980).
- - python_requirements_info - returns python version broken down into its components,
- and some minor refactoring (https://github.com/ansible-collections/community.general/pull/3797).
- - redfish_command - add ``GetHostInterfaces`` command to enable reporting Redfish
- Host Interface information (https://github.com/ansible-collections/community.general/issues/3693).
- - redfish_command - add ``IndicatorLedOn``, ``IndicatorLedOff``, and ``IndicatorLedBlink``
- commands to the Systems category for controling system LEDs (https://github.com/ansible-collections/community.general/issues/4084).
- - redfish_command - add ``SetHostInterface`` command to enable configuring the
- Redfish Host Interface (https://github.com/ansible-collections/community.general/issues/3632).
- - redis - add authentication parameters ``login_user``, ``tls``, ``validate_certs``,
- and ``ca_certs`` (https://github.com/ansible-collections/community.general/pull/4207).
- - scaleway inventory plugin - add profile parameter ``scw_profile`` (https://github.com/ansible-collections/community.general/pull/4049).
- - scaleway_compute - add possibility to use project identifier (new ``project``
- option) instead of deprecated organization identifier (https://github.com/ansible-collections/community.general/pull/3951).
- - scaleway_volume - all volumes are systematically created on par1 (https://github.com/ansible-collections/community.general/pull/3964).
- - seport - minor refactoring (https://github.com/ansible-collections/community.general/pull/4471).
- - smartos_image_info - minor refactoring (https://github.com/ansible-collections/community.general/pull/4567).
- - snap - add option ``options`` permitting to set options using the ``snap set``
- command (https://github.com/ansible-collections/community.general/pull/3943).
- - sudoers - add support for ``runas`` parameter (https://github.com/ansible-collections/community.general/issues/4379).
- - svc - calling ``run_command`` with arguments as ``list`` instead of ``str``
- (https://github.com/ansible-collections/community.general/pull/3829).
- - syslog_json - add option to skip logging of ``gather_facts`` playbook tasks;
- use v2 callback API (https://github.com/ansible-collections/community.general/pull/4223).
- - terraform - adds ``terraform_upgrade`` parameter which allows ``terraform
- init`` to satisfy new provider constraints in an existing Terraform project
- (https://github.com/ansible-collections/community.general/issues/4333).
- - udm_group - minor refactoring (https://github.com/ansible-collections/community.general/pull/4556).
- - udm_share - minor refactoring (https://github.com/ansible-collections/community.general/pull/4556).
- - vmadm - minor refactoring (https://github.com/ansible-collections/community.general/pull/4567).
- - webfaction_app - minor refactoring (https://github.com/ansible-collections/community.general/pull/4567).
- - webfaction_db - minor refactoring (https://github.com/ansible-collections/community.general/pull/4567).
- - xattr - calling ``run_command`` with arguments as ``list`` instead of ``str``
- (https://github.com/ansible-collections/community.general/pull/3806).
- - xfconf - added missing value types ``char``, ``uchar``, ``int64`` and ``uint64``
- (https://github.com/ansible-collections/community.general/pull/4534).
- - xfconf - minor refactor on the base class for the module (https://github.com/ansible-collections/community.general/pull/3919).
- - zypper - add support for ``--clean-deps`` option to remove packages that depend
- on a package being removed (https://github.com/ansible-collections/community.general/pull/4195).
- release_summary: Alpha release for community.general 5.0.0.
- removed_features:
- - ali_instance_info - removed the options ``availability_zone``, ``instance_ids``,
- and ``instance_names``. Use filter item ``zone_id`` instead of ``availability_zone``,
- filter item ``instance_ids`` instead of ``instance_ids``, and filter item
- ``instance_name`` instead of ``instance_names`` (https://github.com/ansible-collections/community.general/pull/4516).
- - apt_rpm - removed the deprecated alias ``update-cache`` of ``update_cache``
- (https://github.com/ansible-collections/community.general/pull/4516).
- - compose - removed various deprecated aliases. Use the version with ``_`` instead
- of ``-`` instead (https://github.com/ansible-collections/community.general/pull/4516).
- - dnsimple - remove support for dnsimple < 2.0.0 (https://github.com/ansible-collections/community.general/pull/4516).
- - github_deploy_key - removed the deprecated alias ``2fa_token`` of ``otp``
- (https://github.com/ansible-collections/community.general/pull/4516).
- - homebrew, homebrew_cask - removed the deprecated alias ``update-brew`` of
- ``update_brew`` (https://github.com/ansible-collections/community.general/pull/4516).
- - linode - removed the ``backupsenabled`` option. Use ``backupweeklyday`` or
- ``backupwindow`` to enable backups (https://github.com/ansible-collections/community.general/pull/4516).
- - opkg - removed the deprecated alias ``update-cache`` of ``update_cache`` (https://github.com/ansible-collections/community.general/pull/4516).
- - pacman - if ``update_cache=true`` is used with ``name`` or ``upgrade``, the
- changed state will now also indicate if only the cache was updated. To keep
- the old behavior - only indicate ``changed`` when a package was installed/upgraded
- -, use ``changed_when`` as indicated in the module examples (https://github.com/ansible-collections/community.general/pull/4516).
- - pacman - removed the deprecated alias ``update-cache`` of ``update_cache``
- (https://github.com/ansible-collections/community.general/pull/4516).
- - proxmox, proxmox_kvm, proxmox_snap - no longer allow to specify a VM name
- that matches multiple VMs. If this happens, the modules now fail (https://github.com/ansible-collections/community.general/pull/4516).
- - serverless - removed the ``functions`` option. It was not used by the module
- (https://github.com/ansible-collections/community.general/pull/4516).
- - slackpkg - removed the deprecated alias ``update-cache`` of ``update_cache``
- (https://github.com/ansible-collections/community.general/pull/4516).
- - urpmi - removed the deprecated alias ``no-recommends`` of ``no_recommends``
- (https://github.com/ansible-collections/community.general/pull/4516).
- - urpmi - removed the deprecated alias ``update-cache`` of ``update_cache``
- (https://github.com/ansible-collections/community.general/pull/4516).
- - xbps - removed the deprecated alias ``update-cache`` of ``update_cache`` (https://github.com/ansible-collections/community.general/pull/4516).
- - xfconf - the ``get`` state has been removed. Use the ``xfconf_info`` module
- instead (https://github.com/ansible-collections/community.general/pull/4516).
- fragments:
- - 1088-add_multiple_ipv6_address_support.yml
- - 1088-nmcli_add_multiple_addresses_support.yml
- - 2386-github_repo-fix-idempotency-issues.yml
- - 3295-mksysb-revamp.yaml
- - 3357-nmcli-eui64-and-ipv6privacy.yml
- - 3374-add-ipa-ptr-sync-support.yml
- - 3519-inventory-support-lxd-4.yml
- - 3625-nmcli_false_changed_mtu_fix.yml
- - 3632-add-redfish-host-interface-config-support.yml
- - 3660-a_module-tombstone.yml
- - 3661-lxd_container-add-vm-support.yml
- - 3667-ldap_search.yml
- - 3675-xattr-handle-base64-values.yml
- - 3681-lvol-fix-create.yml
- - 3693-add-redfish-host-interface-info-support.yml
- - 3694-gitlab-cleanup.yml
- - 3702-ipmi-encryption-key.yml
- - 3703-force-install-homebrew-cask.yml
- - 3708-listen_ports_facts-add-ss-support.yml
- - 3709-support-batch-mode.yml
- - 3726-terraform-missing-parameters-planned-fix.yml
- - 3758-pacman-add-stdout-stderr.yml
- - 3765-extend-open_iscsi-with-rescan.yml
- - 3768-nmcli_fix_changed_when_no_mask_set.yml
- - 3780-add-keycloak-sssd-user-federation.yml
- - 3785-python_requirements_info-versionless-op.yaml
- - 3792-improve_gitlab_group_and_project.yml
- - 3797-python_requirements_info-improvements.yaml
- - 3798-fix-lxd-connection-option-vars-support.yml
- - 3800-pipx-include-apps.yaml
- - 3801-mh-deprecate-vardict-attr.yaml
- - 3805-iso_extract-run_command-list.yaml
- - 3806-xattr-run_command-list.yaml
- - 3807-logentries-run_command-list.yaml
- - 3808-logstash_plugin-run_command-list.yaml
- - 3821-monit-run-list.yaml
- - 3822-ip_netns-run-list.yaml
- - 3829-svc-run-list.yaml
- - 3833-aix_filesystem-run-list.yaml
- - 3834-aix-lvg-run-list.yaml
- - 3835-java-cert-run-list.yaml
- - 3837-opentelemetry_plugin-honour_ignore_errors.yaml
- - 3838-jira-token.yaml
- - 3840-hponcfg-mh-revamp.yaml
- - 3849-mh-check-mode-decos.yaml
- - 3851-lxc-container-run-list.yaml
- - 3862-interfaces-file-fix-dup-option.yaml
- - 3867-jira-fix-body.yaml
- - 3874-proxmox-fix-onboot-param.yml
- - 3875-icinga2-inv-fix.yml
- - 3896-nmcli_vlan_missing_options.yaml
- - 3907-pacman-speedup.yml
- - 3909-nrdp_fix_string_args_without_encoding.yaml
- - 3916-fix-vdo-options-type.yml
- - 3919-xfconf-baseclass.yaml
- - 3921-add-counter-filter-plugin.yml
- - 3930-proxmox-add-clone.yaml
- - 3933-slack-charset-header.yaml
- - 3934-distutils.yml
- - 3935-use-gitlab-instance-runner-to-create-runner.yml
- - 3936-distutils.version.yml
- - 3940_fix_contenttype_scaleway_user_data.yml
- - 3943-add-option-options-to-snap-module.yml
- - 3946-mattermost_attachments.yml
- - 3951-scaleway_compute_add_project_id.yml
- - 3964-scaleway_volume_add_region.yml
- - 3968-ipmi_power-add-machine-option.yaml
- - 3976-fix-alternatives-parsing.yml
- - 3980-puppet-show_diff.yml
- - 3985-nmcli-add-wireguard-connection-type.yml
- - 3988-distutils-vendor-removed.yml
- - 4026-fix-mail-callback.yml
- - 4029-proxmox-refactor.yml
- - 4030-proxmox-has-proxmoxer.yml
- - 4036-onevm-add-release-action.yaml
- - 4038-fix-and-rework-gitlb-project-variable.yml
- - 4039-cluster-container-wait.yml
- - 4040-linode-token-templating.yaml
- - 4043-fix-no-log-opentelemetry.yml
- - 4048-expand-tilde-in-yarn-global-install-folder.yaml
- - 4049-profile-for-scaleway-inventory.yml
- - 4050-properly-parse-json-lines-output-from-yarn.yaml
- - 4052-fix-detection-of-installed-cargo-packages-with-hyphens.yaml
- - 4056-add-missing-mail-headers.yml
- - 4058-lists_mergeby-add-parameters.yml
- - 4061-fix-mail-recipient-encoding.yml
- - 4062-nmcli-ipv6-routes-support.yml
- - 4068-add-include_file-option.yml
- - 4078-python_requirements_info.yaml
- - 4084-add-redfish-system-indicator-led.yml
- - 4086-rework_of_gitlab_proyect_variable_over_gitlab_group_variable.yml
- - 4088-add-constructed-interface-for-icinga2-inventory.yml
- - 4092-fix_local_ports_regex_listen_ports_facts.yaml
- - 4104-opentelemetry_plugin-enrich_docker_login.yaml
- - 4105-opentelemetry_plugin-enrich_jira_hetzner_jenkins_services.yaml
- - 4106-proxmox-efidisk0-support.yaml
- - 4108-nmcli-support-modifcation-without-type-param.yml
- - 4131-nmcli_fix_reports_changed_for_routes4_parameter.yml
- - 4136-gitlab_runner-make-project-owned-mutually-exclusive.yml
- - 4140-mail-callback-options.yml
- - 4150-gitlab-project-variable-absent-fix.yml
- - 4151-dconf-catch-psutil-nosuchprocess.yaml
- - 4154-ini_file_changed.yml
- - 4168-add-keycloak-url-timeout.yml
- - 4179-linode-inventory-cache.yaml
- - 4183-fix-yum_versionlock.yaml
- - 4191-proxmox-add-win11.yml
- - 4192-improve-passwordstore-consistency.yml
- - 4192-zypper-add-clean-deps.yml
- - 4194-configurable-passwordstore-locking.yml
- - 4206-imc-rest-module.yaml
- - 4207-add-redis-tls-support.yml
- - 4212-fixes-for-keycloak-user-federation.yml
- - 4219-passwordstore-locale-fix.yml
- - 4223-syslog-json-skip-syslog-option.yml
- - 4232-text-converter-import.yml
- - 4240-ansible_galaxy_install-no_deps.yml
- - 4275-pacman-sysupgrade.yml
- - 4286-pacman-url-pkgs.yml
- - 4287-fix-proxmox-vm-chek.yml
- - 4288-fix-4259-support-busybox-dd.yml
- - 4299-npm-add-production-with-ci-flag.yml
- - 4303-pipx-editable.yml
- - 4304-jira-fields-in-comment.yml
- - 4306-proxmox-fix-error-on-vm-clone.yml
- - 4312-pacman-groups.yml
- - 4316-pacman-remove-nosave.yml
- - 4318-pacman-restore-old-changed-behavior.yml
- - 4320-nmcli-hairpin.yml
- - 4330-pacman-packages-update_cache.yml
- - 4336-linode-inventory-filtering.yaml
- - 4337-pacman-update_cache.yml
- - 4349-proxmox-inventory-dict-facts.yml
- - 4351-inventory-lxd-handling_metadata_wo_os_and_release.yml
- - 4352-proxmox-inventory-filters.yml
- - 4355-ldap-recursive-delete.yml
- - 4377-allow-proxmox-snapshot-restoring.yml
- - 4378-proxmox-inventory-tags.yml
- - 4380-sudoers-runas-parameter.yml
- - 4382-keycloak-add-missing-validate_certs-parameters.yml
- - 4385-keycloak-client-default-optional-scopes.yml
- - 4386-proxmox-support-templating-in-inventory-file.yml
- - 4417-ipa_service-add-skip_host_check.yml
- - 4421-zypper_package_version_handling_fix.yml
- - 4422-warn-user-if-incorrect-SDK-version-is-installed.yaml
- - 4429-keycloak-client-add-always-display-in-console.yml
- - 4438-fix-error-message.yaml
- - 4455-terraform-provider-upgrade.yml
- - 4457-support-datadog-monitors-type-event-v2.yaml
- - 4459-only-get-monitor-if-it-is-not-null-api-response.yaml
- - 4464-pacman-fix-local-remove.yaml
- - 4465-btrfs-resize.yml
- - 4466-proxmox-ansible_host-deprecation.yml
- - 4471-seport-refactor.yaml
- - 4476-cmd_runner.yml
- - 4479-add-project-support-for-lxd_container-and-lxd_profile.yml
- - 4491-specify_all_in_list_calls.yaml
- - 4492-proxmox_kvm_fix_vm_without_name.yaml
- - 4496-remove-deprecated-method-in-gitlab-branch-module.yml
- - 4506-sudo-in-nmap-inv-plugin.yaml
- - 4516-deprecation-removals.yml
- - 4517-gem-deprecate-norc.yml
- - 4524-update-opennebula-inventory-plugin-to-match-documentation.yaml
- - 4526-keycloak-realm-types.yaml
- - 4530-fix-unauthorized-pritunl-request.yaml
- - 4534-xfconf-added-value-types.yaml
- - 4540-proxmox-inventory-token-auth.yml
- - 4548-remove-2.9-2.10-compatibility.yml
- - 4555-proxmox-lxc-key.yml
- - 4556-remove-default-none-1.yml
- - 4557-alternatives-add-state-parameter.yml
- - 4567-remove-default-none-2.yml
- - 4580-vmadm-deprecate-param-debug.yaml
- - 5.0.0-a1.yml
- - 705-gitlab-auth-support.yml
- plugins:
- filter:
- - description: Counts hashable elements in a sequence
- name: counter
- namespace: null
- release_date: '2022-04-29'
- 5.0.1:
- changes:
- bugfixes:
- - consul - fixed bug introduced in PR 4590 (https://github.com/ansible-collections/community.general/issues/4680).
- - filesystem - handle ``fatresize --info`` output lines without ``:`` (https://github.com/ansible-collections/community.general/pull/4700).
- - filesystem - improve error messages when output cannot be parsed by including
- newlines in escaped form (https://github.com/ansible-collections/community.general/pull/4700).
- - keycloak_realm - fix default groups and roles (https://github.com/ansible-collections/community.general/issues/4241).
- - redis* modules - fix call to ``module.fail_json`` when failing because of
- missing Python libraries (https://github.com/ansible-collections/community.general/pull/4733).
- - xcc_redfish_command - for compatibility due to Redfish spec changes the virtualMedia
- resource location changed from Manager to System (https://github.com/ansible-collections/community.general/pull/4682).
- - zfs - fix wrong quoting of properties (https://github.com/ansible-collections/community.general/issues/4707,
- https://github.com/ansible-collections/community.general/pull/4726).
- minor_changes:
- - cpanm - using ``do_raise()`` to raise exceptions in ``ModuleHelper`` derived
- modules (https://github.com/ansible-collections/community.general/pull/4674).
- - mksysb - using ``do_raise()`` to raise exceptions in ``ModuleHelper`` derived
- modules (https://github.com/ansible-collections/community.general/pull/4674).
- - pipx - using ``do_raise()`` to raise exceptions in ``ModuleHelper`` derived
- modules (https://github.com/ansible-collections/community.general/pull/4674).
- - snap - using ``do_raise()`` to raise exceptions in ``ModuleHelper`` derived
- modules (https://github.com/ansible-collections/community.general/pull/4674).
- - xfconf - using ``do_raise()`` to raise exceptions in ``ModuleHelper`` derived
- modules (https://github.com/ansible-collections/community.general/pull/4674).
- release_summary: Regular bugfix release for inclusion in Ansible 6.0.0.
- fragments:
- - 4674-use-mh-raise.yaml
- - 4682-compatibility-virtualmedia-resource-location.yaml
- - 4700-code-changes.yml
- - 4712-consul-bugfix.yaml
- - 4719-fix-keycloak-realm.yaml
- - 4726-zfs.yml
- - 4733-redis-fail.yml
- - 5.0.1.yml
- release_date: '2022-05-30'
- 5.0.2:
- changes:
- bugfixes:
- - Include ``simplified_bsd.txt`` license file for various module utils, the
- ``lxca_common`` docs fragment, and the ``utm_utils`` unit tests.
- release_summary: Maintenance and bugfix release for Ansible 6.0.0.
- fragments:
- - 5.0.2.yml
- - simplified-bsd-license.yml
- release_date: '2022-06-06'
- 5.1.0:
- changes:
- deprecated_features:
- - cmd_runner module utils - deprecated ``fmt`` in favour of ``cmd_runner_fmt``
- as the parameter format object (https://github.com/ansible-collections/community.general/pull/4777).
- minor_changes:
- - ModuleHelper module utils - improved ``ModuleHelperException``, using ``to_native()``
- for the exception message (https://github.com/ansible-collections/community.general/pull/4755).
- - alternatives - add ``state=absent`` to be able to remove an alternative (https://github.com/ansible-collections/community.general/pull/4654).
- - alternatives - add ``subcommands`` parameter (https://github.com/ansible-collections/community.general/pull/4654).
- - ansible_galaxy_install - minor refactoring using latest ``ModuleHelper`` updates
- (https://github.com/ansible-collections/community.general/pull/4752).
- - cmd_runner module util - added parameters ``check_mode_skip`` and ``check_mode_return``
- to ``CmdRunner.context()``, so that the command is not executed when ``check_mode=True``
- (https://github.com/ansible-collections/community.general/pull/4736).
- - nmcli - adds ``vpn`` type and parameter for supporting VPN with service type
- L2TP and PPTP (https://github.com/ansible-collections/community.general/pull/4746).
- - proxmox inventory plugin - added new flag ``qemu_extended_statuses`` and new
- groups ``prelaunch``, ``paused``. They will be
- populated only when ``want_facts=true``, ``qemu_extended_statuses=true`` and
- only for ``QEMU`` machines (https://github.com/ansible-collections/community.general/pull/4723).
- - puppet - adds ``confdir`` parameter to configure a custom confir location
- (https://github.com/ansible-collections/community.general/pull/4740).
- - xfconf - changed implementation to use ``cmd_runner`` (https://github.com/ansible-collections/community.general/pull/4776).
- - xfconf module utils - created new module util ``xfconf`` providing a ``cmd_runner``
- specific for ``xfconf`` modules (https://github.com/ansible-collections/community.general/pull/4776).
- - xfconf_info - changed implementation to use ``cmd_runner`` (https://github.com/ansible-collections/community.general/pull/4776).
- release_summary: Regular bugfix and feature release.
- fragments:
- - 4654-alternatives-add-subcommands.yml
- - 4724-proxmox-qemu-extend.yaml
- - 4736-cmd-runner-skip-if-check.yml
- - 4740-puppet-feature.yaml
- - 4746-add-vpn-support-nmcli.yaml
- - 4752-ansible-galaxy-install-mh-updates.yml
- - 4755-mhexception-improvement.yml
- - 4776-xfconf-cmd-runner.yaml
- - 4777-cmd-runner-deprecate-fmt.yaml
- - 5.1.0.yml
- modules:
- - description: Retrieve GConf configurations
- name: gconftool2_info
- namespace: system
- release_date: '2022-06-07'
- 5.1.1:
- changes:
- bugfixes:
- - alternatives - do not set the priority if the priority was not set by the
- user (https://github.com/ansible-collections/community.general/pull/4810).
- - alternatives - only pass subcommands when they are specified as module arguments
- (https://github.com/ansible-collections/community.general/issues/4803, https://github.com/ansible-collections/community.general/issues/4804,
- https://github.com/ansible-collections/community.general/pull/4836).
- - alternatives - when ``subcommands`` is specified, ``link`` must be given for
- every subcommand. This was already mentioned in the documentation, but not
- enforced by the code (https://github.com/ansible-collections/community.general/pull/4836).
- - nmcli - fix error caused by adding undefined module arguments for list options
- (https://github.com/ansible-collections/community.general/issues/4373, https://github.com/ansible-collections/community.general/pull/4813).
- - proxmox inventory plugin - fixed extended status detection for qemu (https://github.com/ansible-collections/community.general/pull/4816).
- - redhat_subscription - fix unsubscribing on RHEL 9 (https://github.com/ansible-collections/community.general/issues/4741).
- - sudoers - ensure sudoers config files are created with the permissions requested
- by sudoers (0440) (https://github.com/ansible-collections/community.general/pull/4814).
- release_summary: Bugfix release.
- fragments:
- - 4809-redhat_subscription-unsubscribe.yaml
- - 4810-alternatives-bug.yml
- - 4813-fix-nmcli-convert-list.yaml
- - 4814-sudoers-file-permissions.yml
- - 4816-proxmox-fix-extended-status.yaml
- - 4836-alternatives.yml
- - 5.1.1.yml
- release_date: '2022-06-14'
- 5.2.0:
- changes:
- bugfixes:
- - Include ``PSF-license.txt`` file for ``plugins/module_utils/_mount.py``.
- - redfish_command - fix the check if a virtual media is unmounted to just check
- for ``instered= false`` caused by Supermicro hardware that does not clear
- the ``ImageName`` (https://github.com/ansible-collections/community.general/pull/4839).
- - redfish_command - the Supermicro Redfish implementation only supports the
- ``image_url`` parameter in the underlying API calls to ``VirtualMediaInsert``
- and ``VirtualMediaEject``. Any values set (or the defaults) for ``write_protected``
- or ``inserted`` will be ignored (https://github.com/ansible-collections/community.general/pull/4839).
- - 'sudoers - fix incorrect handling of ``state: absent`` (https://github.com/ansible-collections/community.general/issues/4852).'
- minor_changes:
- - cmd_runner module utils - add ``__call__`` method to invoke context (https://github.com/ansible-collections/community.general/pull/4791).
- - passwordstore lookup plugin - allow using alternative password managers by
- detecting wrapper scripts, allow explicit configuration of pass and gopass
- backends (https://github.com/ansible-collections/community.general/issues/4766).
- - sudoers - will attempt to validate the proposed sudoers rule using visudo
- if available, optionally skipped, or required (https://github.com/ansible-collections/community.general/pull/4794,
- https://github.com/ansible-collections/community.general/issues/4745).
- release_summary: Regular bugfix and feature release.
- fragments:
- - 4780-passwordstore-wrapper-compat.yml
- - 4791-cmd-runner-callable.yaml
- - 4794-sudoers-validation.yml
- - 4839-fix-VirtualMediaInsert-Supermicro.yml
- - 4852-sudoers-state-absent.yml
- - 5.2.0.yml
- - psf-license.yml
- modules:
- - description: Set or delete a passphrase using the Operating System's native
- keyring
- name: keyring
- namespace: system
- - description: Get a passphrase using the Operating System's native keyring
- name: keyring_info
- namespace: system
- - description: Scaleway compute - private network management
- name: scaleway_compute_private_network
- namespace: cloud.scaleway
- release_date: '2022-06-21'
- 5.3.0:
- changes:
- bugfixes:
- - cmd_runner module utils - fix bug caused by using the ``command`` variable
- instead of ``self.command`` when looking for binary path (https://github.com/ansible-collections/community.general/pull/4903).
- - dsv lookup plugin - do not ignore the ``tld`` parameter (https://github.com/ansible-collections/community.general/pull/4911).
- - lxd connection plugin - fix incorrect ``inventory_hostname`` in ``remote_addr``.
- This is needed for compatibility with ansible-core 2.13 (https://github.com/ansible-collections/community.general/issues/4886).
- - proxmox inventory plugin - fix crash when ``enabled=1`` is used in agent config
- string (https://github.com/ansible-collections/community.general/pull/4910).
- - rax_clb_nodes - fix code to be compatible with Python 3 (https://github.com/ansible-collections/community.general/pull/4933).
- - redfish_info - fix to ``GetChassisPower`` to correctly report power information
- when multiple chassis exist, but not all chassis report power information
- (https://github.com/ansible-collections/community.general/issues/4901).
- minor_changes:
- - machinectl become plugin - can now be used with a password from another user
- than root, if a polkit rule is present (https://github.com/ansible-collections/community.general/pull/4849).
- - opentelemetry callback plugin - allow configuring opentelementry callback
- via config file (https://github.com/ansible-collections/community.general/pull/4916).
- - redfish_info - add ``GetManagerInventory`` to report list of Manager inventory
- information (https://github.com/ansible-collections/community.general/issues/4899).
- release_summary: Regular bugfix and feature release.
- fragments:
- - 4849-add-password-prompt-support-for-machinectl.yml
- - 4886-fix-lxd-inventory-hostname.yml
- - 4899-add-GetManagerInventory-for-redfish_info.yml
- - 4901-fix-redfish-chassispower.yml
- - 4903-cmdrunner-bugfix.yaml
- - 4910-fix-for-agent-enabled.yml
- - 4911-dsv-honor-tld-option.yml
- - 4916-opentelemetry-ini-options.yaml
- - 4933-fix-rax-clb-nodes.yaml
- - 5.3.0.yml
- release_date: '2022-07-12'
- 5.4.0:
- changes:
- bugfixes:
- - keyring_info - fix the result from the keyring library never getting returned
- (https://github.com/ansible-collections/community.general/pull/4964).
- - pacman - fixed name resolution of URL packages (https://github.com/ansible-collections/community.general/pull/4959).
- - passwordstore lookup plugin - fix ``returnall`` for gopass (https://github.com/ansible-collections/community.general/pull/5027).
- - passwordstore lookup plugin - fix password store path detection for gopass
- (https://github.com/ansible-collections/community.general/pull/4955).
- - proxmox - fix error handling when getting VM by name when ``state=absent``
- (https://github.com/ansible-collections/community.general/pull/4945).
- - proxmox_kvm - fix error handling when getting VM by name when ``state=absent``
- (https://github.com/ansible-collections/community.general/pull/4945).
- - slack - fix incorrect channel prefix ``#`` caused by incomplete pattern detection
- by adding ``G0`` and ``GF`` as channel ID patterns (https://github.com/ansible-collections/community.general/pull/5019).
- - xfconf - fix setting of boolean values (https://github.com/ansible-collections/community.general/issues/4999,
- https://github.com/ansible-collections/community.general/pull/5007).
- minor_changes:
- - ModuleHelper module utils - added property ``verbosity`` to base class (https://github.com/ansible-collections/community.general/pull/5035).
- - apk - add ``world`` parameter for supporting a custom world file (https://github.com/ansible-collections/community.general/pull/4976).
- - consul - adds ``ttl`` parameter for session (https://github.com/ansible-collections/community.general/pull/4996).
- - dig lookup plugin - add option ``fail_on_error`` to allow stopping execution
- on lookup failures (https://github.com/ansible-collections/community.general/pull/4973).
- - keycloak_* modules - add ``http_agent`` parameter with default value ``Ansible``
- (https://github.com/ansible-collections/community.general/issues/5023).
- - lastpass - use config manager for handling plugin options (https://github.com/ansible-collections/community.general/pull/5022).
- - listen_ports_facts - add new ``include_non_listening`` option which adds ``-a``
- option to ``netstat`` and ``ss``. This shows both listening and non-listening
- (for TCP this means established connections) sockets, and returns ``state``
- and ``foreign_address`` (https://github.com/ansible-collections/community.general/issues/4762,
- https://github.com/ansible-collections/community.general/pull/4953).
- - maven_artifact - add a new ``unredirected_headers`` option that can be used
- with ansible-core 2.12 and above. The default value is to not use ``Authorization``
- and ``Cookie`` headers on redirects for security reasons. With ansible-core
- 2.11, all headers are still passed on for redirects (https://github.com/ansible-collections/community.general/pull/4812).
- - pacman - added parameters ``reason`` and ``reason_for`` to set/change the
- install reason of packages (https://github.com/ansible-collections/community.general/pull/4956).
- - xfconf - add ``stdout``, ``stderr`` and ``cmd`` to the module results (https://github.com/ansible-collections/community.general/pull/5037).
- - xfconf - use ``do_raise()`` instead of defining custom exception class (https://github.com/ansible-collections/community.general/pull/4975).
- - xfconf_info - use ``do_raise()`` instead of defining custom exception class
- (https://github.com/ansible-collections/community.general/pull/4975).
- release_summary: Regular bugfix and feature release.
- fragments:
- - 4812-expose-unredirected-headers.yml
- - 4945-fix-get_vm-int-parse-handling.yaml
- - 4953-listen-ports-facts-extend-output.yaml
- - 4955-fix-path-detection-for-gopass.yaml
- - 4956-pacman-install-reason.yaml
- - 4959-pacman-fix-url-packages-name.yaml
- - 4964-fix-keyring-info.yml
- - 4973-introduce-dig-lookup-argument.yaml
- - 4975-xfconf-use-do-raise.yaml
- - 4976-apk-add-support-for-a-custom-world-file.yaml
- - 4996-consul-session-ttl.yml
- - 4999-xfconf-bool.yml
- - 5.4.0.yml
- - 5019-slack-support-more-groups.yml
- - 5022-lastpass-lookup-cleanup.yml
- - 5023-http-agent-param-keycloak.yml
- - 5027-fix-returnall-for-gopass.yaml
- - 5035-mh-base-verbosity.yaml
- - 5037-xfconf-add-cmd-output.yaml
- modules:
- - description: Manages WDC UltraStar Data102 Out-Of-Band controllers using Redfish
- APIs
- name: wdc_redfish_command
- namespace: remote_management.redfish
- - description: Manages WDC UltraStar Data102 Out-Of-Band controllers using Redfish
- APIs
- name: wdc_redfish_info
- namespace: remote_management.redfish
- plugins:
- lookup:
- - description: Retrieve secrets from Bitwarden
- name: bitwarden
- namespace: null
- release_date: '2022-08-02'
- 5.5.0:
- changes:
- bugfixes:
- - apache2_mod_proxy - avoid crash when reporting inability to parse balancer_member_page
- HTML caused by using an undefined variable in the error message (https://github.com/ansible-collections/community.general/pull/5111).
- - dig lookup plugin - fix evaluation of falsy values for boolean parameters
- ``fail_on_error`` and ``retry_servfail`` (https://github.com/ansible-collections/community.general/pull/5129).
- - dnsimple_info - correctly report missing library as ``requests`` and not ``another_library``
- (https://github.com/ansible-collections/community.general/pull/5111).
- - funcd connection plugin - fix signature of ``exec_command`` (https://github.com/ansible-collections/community.general/pull/5111).
- - manageiq_alert_profiles - avoid crash when reporting unknown profile caused
- by trying to return an undefined variable (https://github.com/ansible-collections/community.general/pull/5111).
- - nsupdate - compatibility with NS records (https://github.com/ansible-collections/community.general/pull/5112).
- - packet_ip_subnet - fix error reporting in case of invalid CIDR prefix lengths
- (https://github.com/ansible-collections/community.general/pull/5111).
- - pip_package_info - remove usage of global variable (https://github.com/ansible-collections/community.general/pull/5111).
- - proxmox_kvm - fix wrong condition (https://github.com/ansible-collections/community.general/pull/5108).
- minor_changes:
- - Added MIT license as ``LICENSES/MIT.txt`` for tests/unit/plugins/modules/packaging/language/test_gem.py
- (https://github.com/ansible-collections/community.general/pull/5065).
- - All software licenses are now in the ``LICENSES/`` directory of the collection
- root (https://github.com/ansible-collections/community.general/pull/5065,
- https://github.com/ansible-collections/community.general/pull/5079, https://github.com/ansible-collections/community.general/pull/5080,
- https://github.com/ansible-collections/community.general/pull/5083, https://github.com/ansible-collections/community.general/pull/5087,
- https://github.com/ansible-collections/community.general/pull/5095, https://github.com/ansible-collections/community.general/pull/5098,
- https://github.com/ansible-collections/community.general/pull/5106).
- - The collection repository conforms to the `REUSE specification `__
- except for the changelog fragments (https://github.com/ansible-collections/community.general/pull/5138).
- - pipx - added state ``latest`` to the module (https://github.com/ansible-collections/community.general/pull/5105).
- - pipx - changed implementation to use ``cmd_runner`` (https://github.com/ansible-collections/community.general/pull/5085).
- - pipx - module fails faster when ``name`` is missing for states ``upgrade``
- and ``reinstall`` (https://github.com/ansible-collections/community.general/pull/5100).
- - pipx module utils - created new module util ``pipx`` providing a ``cmd_runner``
- specific for the ``pipx`` module (https://github.com/ansible-collections/community.general/pull/5085).
- - proxmox_kvm - allow ``agent`` argument to be a string (https://github.com/ansible-collections/community.general/pull/5107).
- - wdc_redfish_command - add ``IndicatorLedOn`` and ``IndicatorLedOff`` commands
- for ``Chassis`` category (https://github.com/ansible-collections/community.general/pull/5059).
- release_summary: Feature and bugfix release.
- fragments:
- - 5.5.0.yml
- - 5059-wdc_redfish_command-indicator-leds.yml
- - 5085-pipx-use-cmd-runner.yaml
- - 5100-pipx-req-if.yaml
- - 5105-pipx-state-latest.yaml
- - 5107-proxmox-agent-argument.yaml
- - 5108-proxmox-node-name-condition.yml
- - 5111-fixes.yml
- - 5112-fix-nsupdate-ns-entry.yaml
- - 5129-dig-boolean-params-fix.yml
- - licenses-2.yml
- - licenses.yml
- release_date: '2022-08-23'
+---
+ancestor: 11.0.0
+releases: {}
diff --git a/changelogs/config.yaml b/changelogs/config.yaml
index 52e101e11f..578b8c3765 100644
--- a/changelogs/config.yaml
+++ b/changelogs/config.yaml
@@ -7,28 +7,37 @@ changelog_filename_template: ../CHANGELOG.rst
changelog_filename_version_depth: 0
changes_file: changelog.yaml
changes_format: combined
+ignore_other_fragment_extensions: true
keep_fragments: false
mention_ancestor: true
-flatmap: true
new_plugins_after_name: removed_features
notesdir: fragments
+output_formats:
+ - md
+ - rst
prelude_section_name: release_summary
prelude_section_title: Release Summary
sections:
-- - major_changes
- - Major Changes
-- - minor_changes
- - Minor Changes
-- - breaking_changes
- - Breaking Changes / Porting Guide
-- - deprecated_features
- - Deprecated Features
-- - removed_features
- - Removed Features (previously deprecated)
-- - security_fixes
- - Security Fixes
-- - bugfixes
- - Bugfixes
-- - known_issues
- - Known Issues
+ - - major_changes
+ - Major Changes
+ - - minor_changes
+ - Minor Changes
+ - - breaking_changes
+ - Breaking Changes / Porting Guide
+ - - deprecated_features
+ - Deprecated Features
+ - - removed_features
+ - Removed Features (previously deprecated)
+ - - security_fixes
+ - Security Fixes
+ - - bugfixes
+ - Bugfixes
+ - - known_issues
+ - Known Issues
title: Community General
+trivial_section_name: trivial
+use_fqcn: true
+add_plugin_period: true
+changelog_nice_yaml: true
+changelog_sort: version
+vcs: auto
diff --git a/changelogs/fragments/10227-pacemaker-cluster-and-resource-enhancement.yml b/changelogs/fragments/10227-pacemaker-cluster-and-resource-enhancement.yml
new file mode 100644
index 0000000000..d1cfee7816
--- /dev/null
+++ b/changelogs/fragments/10227-pacemaker-cluster-and-resource-enhancement.yml
@@ -0,0 +1,7 @@
+deprecated_features:
+ - pacemaker_cluster - the parameter ``state`` will become a required parameter in community.general 12.0.0 (https://github.com/ansible-collections/community.general/pull/10227).
+
+minor_changes:
+ - pacemaker_cluster - add ``state=maintenance`` for managing pacemaker maintenance mode (https://github.com/ansible-collections/community.general/issues/10200, https://github.com/ansible-collections/community.general/pull/10227).
+ - pacemaker_cluster - rename ``node`` to ``name`` and add ``node`` alias (https://github.com/ansible-collections/community.general/pull/10227).
+ - pacemaker_resource - enhance module by removing duplicative code (https://github.com/ansible-collections/community.general/pull/10227).
diff --git a/changelogs/fragments/10231-keycloak-add-client-credentials-authentication.yml b/changelogs/fragments/10231-keycloak-add-client-credentials-authentication.yml
new file mode 100644
index 0000000000..eec12e8669
--- /dev/null
+++ b/changelogs/fragments/10231-keycloak-add-client-credentials-authentication.yml
@@ -0,0 +1,2 @@
+minor_changes:
+ - keycloak - add support for ``grant_type=client_credentials`` to all keycloak modules, so that specifying ``auth_client_id`` and ``auth_client_secret`` is sufficient for authentication (https://github.com/ansible-collections/community.general/pull/10231).
diff --git a/changelogs/fragments/10267-add-cloudflare-ptr-record-support.yml b/changelogs/fragments/10267-add-cloudflare-ptr-record-support.yml
new file mode 100644
index 0000000000..29d71ca393
--- /dev/null
+++ b/changelogs/fragments/10267-add-cloudflare-ptr-record-support.yml
@@ -0,0 +1,2 @@
+minor_changes:
+ - cloudflare_dns - adds support for PTR records (https://github.com/ansible-collections/community.general/pull/10267).
diff --git a/changelogs/fragments/10269-cloudflare-dns-refactor.yml b/changelogs/fragments/10269-cloudflare-dns-refactor.yml
new file mode 100644
index 0000000000..9f91040d63
--- /dev/null
+++ b/changelogs/fragments/10269-cloudflare-dns-refactor.yml
@@ -0,0 +1,2 @@
+minor_changes:
+ - cloudflare_dns - simplify validations and refactor some code, no functional changes (https://github.com/ansible-collections/community.general/pull/10269).
diff --git a/changelogs/fragments/10271--disable_lookups.yml b/changelogs/fragments/10271--disable_lookups.yml
new file mode 100644
index 0000000000..d28e2ac833
--- /dev/null
+++ b/changelogs/fragments/10271--disable_lookups.yml
@@ -0,0 +1,3 @@
+bugfixes:
+ - "icinga2 inventory plugin - avoid using deprecated option when templating options (https://github.com/ansible-collections/community.general/pull/10271)."
+ - "linode inventory plugin - avoid using deprecated option when templating options (https://github.com/ansible-collections/community.general/pull/10271)."
diff --git a/changelogs/fragments/10285-fstr-plugins.yml b/changelogs/fragments/10285-fstr-plugins.yml
new file mode 100644
index 0000000000..6fff590fee
--- /dev/null
+++ b/changelogs/fragments/10285-fstr-plugins.yml
@@ -0,0 +1,7 @@
+minor_changes:
+ - dense callback plugin - use f-strings instead of concatenation (https://github.com/ansible-collections/community.general/pull/10285).
+ - mail callback plugin - use f-strings instead of concatenation (https://github.com/ansible-collections/community.general/pull/10285).
+ - wsl connection plugin - use f-strings instead of concatenation (https://github.com/ansible-collections/community.general/pull/10285).
+ - jc filter plugin - use f-strings instead of concatenation (https://github.com/ansible-collections/community.general/pull/10285).
+ - iocage inventory plugin - use f-strings instead of concatenation (https://github.com/ansible-collections/community.general/pull/10285).
+ - xen_orchestra inventory plugin - use f-strings instead of concatenation (https://github.com/ansible-collections/community.general/pull/10285).
diff --git a/changelogs/fragments/10299-github_app_access_token-lookup.yml b/changelogs/fragments/10299-github_app_access_token-lookup.yml
new file mode 100644
index 0000000000..59233e2a05
--- /dev/null
+++ b/changelogs/fragments/10299-github_app_access_token-lookup.yml
@@ -0,0 +1,2 @@
+minor_changes:
+ - github_app_access_token lookup plugin - support both ``jwt`` and ``pyjwt`` to avoid conflict with other modules requirements (https://github.com/ansible-collections/community.general/issues/10299).
diff --git a/changelogs/fragments/10311-xfconf-refactor.yml b/changelogs/fragments/10311-xfconf-refactor.yml
new file mode 100644
index 0000000000..9d71bd17d8
--- /dev/null
+++ b/changelogs/fragments/10311-xfconf-refactor.yml
@@ -0,0 +1,2 @@
+minor_changes:
+ - xfconf - minor adjustments the the code (https://github.com/ansible-collections/community.general/pull/10311).
diff --git a/changelogs/fragments/10323-nmcli-improvements.yml b/changelogs/fragments/10323-nmcli-improvements.yml
new file mode 100644
index 0000000000..53436ea7d6
--- /dev/null
+++ b/changelogs/fragments/10323-nmcli-improvements.yml
@@ -0,0 +1,2 @@
+minor_changes:
+ - nmcli - simplify validations and refactor some code, no functional changes (https://github.com/ansible-collections/community.general/pull/10323).
diff --git a/changelogs/fragments/10328-redundant-brackets.yml b/changelogs/fragments/10328-redundant-brackets.yml
new file mode 100644
index 0000000000..f8f74a336c
--- /dev/null
+++ b/changelogs/fragments/10328-redundant-brackets.yml
@@ -0,0 +1,32 @@
+minor_changes:
+ - logstash callback plugin - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328).
+ - keycloak module utils - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328).
+ - python_runner module utils - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328).
+ - cloudflare_dns - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328).
+ - crypttab - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328).
+ - datadog_monitor - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328).
+ - gitlab_deploy_key - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328).
+ - gitlab_group_access_token - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328).
+ - gitlab_hook - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328).
+ - gitlab_project_access_token - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328).
+ - gitlab_runner - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328).
+ - ipa_group - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328).
+ - jenkins_build - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328).
+ - jenkins_build_info - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328).
+ - nmcli - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328).
+ - oneandone_firewall_policy - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328).
+ - oneandone_load_balancer - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328).
+ - oneandone_monitoring_policy - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328).
+ - onepassword_info - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328).
+ - osx_defaults - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328).
+ - ovh_ip_loadbalancing_backend - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328).
+ - packet_device - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328).
+ - pagerduty - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328).
+ - pingdom - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328).
+ - rhevm - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328).
+ - rocketchat - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328).
+ - sensu_silence - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328).
+ - sl_vm - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328).
+ - urpmi - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328).
+ - xattr - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328).
+ - xml - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328).
diff --git a/changelogs/fragments/10329-catapult-deprecation.yml b/changelogs/fragments/10329-catapult-deprecation.yml
new file mode 100644
index 0000000000..5e5209edda
--- /dev/null
+++ b/changelogs/fragments/10329-catapult-deprecation.yml
@@ -0,0 +1,2 @@
+deprecated_features:
+ - catapult - module is deprecated and will be removed in community.general 13.0.0 (https://github.com/ansible-collections/community.general/issues/10318, https://github.com/ansible-collections/community.general/pull/10329).
diff --git a/changelogs/fragments/10339-github_app_access_token.yml b/changelogs/fragments/10339-github_app_access_token.yml
new file mode 100644
index 0000000000..00cd71f559
--- /dev/null
+++ b/changelogs/fragments/10339-github_app_access_token.yml
@@ -0,0 +1,2 @@
+bugfixes:
+ - github_release - support multiple types of GitHub tokens; no longer failing when ``ghs_`` token type is provided (https://github.com/ansible-collections/community.general/issues/10338, https://github.com/ansible-collections/community.general/pull/10339).
\ No newline at end of file
diff --git a/changelogs/fragments/10346-jenkins-plugins-fixes.yml b/changelogs/fragments/10346-jenkins-plugins-fixes.yml
new file mode 100644
index 0000000000..382fe7aa53
--- /dev/null
+++ b/changelogs/fragments/10346-jenkins-plugins-fixes.yml
@@ -0,0 +1,6 @@
+bugfixes:
+ - "jenkins_plugin - install latest compatible version instead of latest (https://github.com/ansible-collections/community.general/issues/854, https://github.com/ansible-collections/community.general/pull/10346)."
+ - "jenkins_plugin - separate Jenkins and external URL credentials (https://github.com/ansible-collections/community.general/issues/4419, https://github.com/ansible-collections/community.general/pull/10346)."
+
+minor_changes:
+ - "jenkins_plugin - install dependencies for specific version (https://github.com/ansible-collections/community.general/issue/4995, https://github.com/ansible-collections/community.general/pull/10346)."
diff --git a/changelogs/fragments/10349-incus_connection-error-handling.yml b/changelogs/fragments/10349-incus_connection-error-handling.yml
new file mode 100644
index 0000000000..b35da354d2
--- /dev/null
+++ b/changelogs/fragments/10349-incus_connection-error-handling.yml
@@ -0,0 +1,2 @@
+bugfixes:
+ - incus connection plugin - fix error handling to return more useful Ansible errors to the user (https://github.com/ansible-collections/community.general/issues/10344, https://github.com/ansible-collections/community.general/pull/10349).
diff --git a/changelogs/fragments/10359-dependent.yml b/changelogs/fragments/10359-dependent.yml
new file mode 100644
index 0000000000..e48a6142e8
--- /dev/null
+++ b/changelogs/fragments/10359-dependent.yml
@@ -0,0 +1,2 @@
+bugfixes:
+ - "dependent lookup plugin - avoid deprecated ansible-core 2.19 functionality (https://github.com/ansible-collections/community.general/pull/10359)."
diff --git a/changelogs/fragments/10413-pacemaker-resource-cleanup.yml b/changelogs/fragments/10413-pacemaker-resource-cleanup.yml
new file mode 100644
index 0000000000..f4157559cc
--- /dev/null
+++ b/changelogs/fragments/10413-pacemaker-resource-cleanup.yml
@@ -0,0 +1,3 @@
+minor_changes:
+ - pacemaker_resource - add ``state=cleanup`` for cleaning up pacemaker resources (https://github.com/ansible-collections/community.general/pull/10413)
+ - pacemaker_resource - the parameter ``name`` is no longer a required parameter in community.general 11.3.0 (https://github.com/ansible-collections/community.general/pull/10413)
diff --git a/changelogs/fragments/10415-keycloak-realm-brute-force-attributes.yml b/changelogs/fragments/10415-keycloak-realm-brute-force-attributes.yml
new file mode 100644
index 0000000000..22433b584e
--- /dev/null
+++ b/changelogs/fragments/10415-keycloak-realm-brute-force-attributes.yml
@@ -0,0 +1,2 @@
+minor_changes:
+ - keycloak_realm - add support for ``brute_force_strategy`` and ``max_temporary_lockouts`` (https://github.com/ansible-collections/community.general/issues/10412, https://github.com/ansible-collections/community.general/pull/10415).
diff --git a/changelogs/fragments/10417-sysrc-refactor.yml b/changelogs/fragments/10417-sysrc-refactor.yml
new file mode 100644
index 0000000000..b1b5db632b
--- /dev/null
+++ b/changelogs/fragments/10417-sysrc-refactor.yml
@@ -0,0 +1,4 @@
+minor_changes:
+ - sysrc - adjustments to the code (https://github.com/ansible-collections/community.general/pull/10417).
+bugfixes:
+ - sysrc - fixes parsing with multi-line variables (https://github.com/ansible-collections/community.general/issues/10394, https://github.com/ansible-collections/community.general/pull/10417).
\ No newline at end of file
diff --git a/changelogs/fragments/10422-tasks_only-result_format.yml b/changelogs/fragments/10422-tasks_only-result_format.yml
new file mode 100644
index 0000000000..13e5e749bf
--- /dev/null
+++ b/changelogs/fragments/10422-tasks_only-result_format.yml
@@ -0,0 +1,2 @@
+minor_changes:
+ - "tasks_only callback plugin - add ``result_format`` and ``pretty_results`` options similarly to the default callback (https://github.com/ansible-collections/community.general/pull/10422)."
diff --git a/changelogs/fragments/10423-apache_module-condition.yml b/changelogs/fragments/10423-apache_module-condition.yml
new file mode 100644
index 0000000000..9a30d06b4e
--- /dev/null
+++ b/changelogs/fragments/10423-apache_module-condition.yml
@@ -0,0 +1,2 @@
+bugfixes:
+ - apache2_module - check the ``cgi`` module restrictions only during activation (https://github.com/ansible-collections/community.general/pull/10423).
diff --git a/changelogs/fragments/10424-scaleway-update-zones.yml b/changelogs/fragments/10424-scaleway-update-zones.yml
new file mode 100644
index 0000000000..ffa508cd3a
--- /dev/null
+++ b/changelogs/fragments/10424-scaleway-update-zones.yml
@@ -0,0 +1,2 @@
+minor_changes:
+ - scaleway_* modules, scaleway inventory plugin - update available zones and API URLs (https://github.com/ansible-collections/community.general/issues/10383, https://github.com/ansible-collections/community.general/pull/10424).
\ No newline at end of file
diff --git a/changelogs/fragments/10434-cpanm-deprecate-compat-mode.yml b/changelogs/fragments/10434-cpanm-deprecate-compat-mode.yml
new file mode 100644
index 0000000000..84b6ecf471
--- /dev/null
+++ b/changelogs/fragments/10434-cpanm-deprecate-compat-mode.yml
@@ -0,0 +1,2 @@
+deprecated_features:
+ - cpanm - deprecate ``mode=compatibility``, ``mode=new`` should be used instead (https://github.com/ansible-collections/community.general/pull/10434).
diff --git a/changelogs/fragments/10435-github-repo-deprecate-force-defaults.yml b/changelogs/fragments/10435-github-repo-deprecate-force-defaults.yml
new file mode 100644
index 0000000000..cccb3a4c5f
--- /dev/null
+++ b/changelogs/fragments/10435-github-repo-deprecate-force-defaults.yml
@@ -0,0 +1,2 @@
+deprecated_features:
+ - github_repo - deprecate ``force_defaults=true`` (https://github.com/ansible-collections/community.general/pull/10435).
diff --git a/changelogs/fragments/10442-apk-fix-empty-names.yml b/changelogs/fragments/10442-apk-fix-empty-names.yml
new file mode 100644
index 0000000000..24d68b52df
--- /dev/null
+++ b/changelogs/fragments/10442-apk-fix-empty-names.yml
@@ -0,0 +1,3 @@
+bugfixes:
+ - apk - handle empty name strings properly
+ (https://github.com/ansible-collections/community.general/issues/10441, https://github.com/ansible-collections/community.general/pull/10442).
\ No newline at end of file
diff --git a/changelogs/fragments/10445-cronvar-reject-empty-values.yml b/changelogs/fragments/10445-cronvar-reject-empty-values.yml
new file mode 100644
index 0000000000..1bf39619cc
--- /dev/null
+++ b/changelogs/fragments/10445-cronvar-reject-empty-values.yml
@@ -0,0 +1,2 @@
+bugfixes:
+ - "cronvar - handle empty strings on ``value`` properly (https://github.com/ansible-collections/community.general/issues/10439, https://github.com/ansible-collections/community.general/pull/10445)."
diff --git a/changelogs/fragments/10455-capabilities-improve-error-detection.yml b/changelogs/fragments/10455-capabilities-improve-error-detection.yml
new file mode 100644
index 0000000000..40337a424b
--- /dev/null
+++ b/changelogs/fragments/10455-capabilities-improve-error-detection.yml
@@ -0,0 +1,2 @@
+bugfixes:
+ - capabilities - using invalid path (symlink/directory/...) returned unrelated and incoherent error messages (https://github.com/ansible-collections/community.general/issues/5649, https://github.com/ansible-collections/community.general/pull/10455).
\ No newline at end of file
diff --git a/changelogs/fragments/10458-listen_port_facts-prevent-type-error.yml b/changelogs/fragments/10458-listen_port_facts-prevent-type-error.yml
new file mode 100644
index 0000000000..70af0932b3
--- /dev/null
+++ b/changelogs/fragments/10458-listen_port_facts-prevent-type-error.yml
@@ -0,0 +1,2 @@
+bugfixes:
+ - "listen_port_facts - avoid crash when required commands are missing (https://github.com/ansible-collections/community.general/issues/10457, https://github.com/ansible-collections/community.general/pull/10458)."
\ No newline at end of file
diff --git a/changelogs/fragments/10459-deprecations.yml b/changelogs/fragments/10459-deprecations.yml
new file mode 100644
index 0000000000..4b3f317454
--- /dev/null
+++ b/changelogs/fragments/10459-deprecations.yml
@@ -0,0 +1,6 @@
+bugfixes:
+ - "apache2_module - avoid ansible-core 2.19 deprecation (https://github.com/ansible-collections/community.general/pull/10459)."
+ - "htpasswd - avoid ansible-core 2.19 deprecation (https://github.com/ansible-collections/community.general/pull/10459)."
+ - "syspatch - avoid ansible-core 2.19 deprecation (https://github.com/ansible-collections/community.general/pull/10459)."
+ - "sysupgrade - avoid ansible-core 2.19 deprecation (https://github.com/ansible-collections/community.general/pull/10459)."
+ - "zypper_repository - avoid ansible-core 2.19 deprecation (https://github.com/ansible-collections/community.general/pull/10459)."
diff --git a/changelogs/fragments/10461-cronvar-non-existent-dir-crash-fix.yml b/changelogs/fragments/10461-cronvar-non-existent-dir-crash-fix.yml
new file mode 100644
index 0000000000..c4b77299f5
--- /dev/null
+++ b/changelogs/fragments/10461-cronvar-non-existent-dir-crash-fix.yml
@@ -0,0 +1,2 @@
+bugfixes:
+ - "cronvar - fix crash on missing ``cron_file`` parent directories (https://github.com/ansible-collections/community.general/issues/10460, https://github.com/ansible-collections/community.general/pull/10461)."
diff --git a/changelogs/fragments/10483-sensu-subscription-quotes.yml b/changelogs/fragments/10483-sensu-subscription-quotes.yml
new file mode 100644
index 0000000000..355099684c
--- /dev/null
+++ b/changelogs/fragments/10483-sensu-subscription-quotes.yml
@@ -0,0 +1,2 @@
+minor_changes:
+ - sensu_subscription - normalize quotes in the module output (https://github.com/ansible-collections/community.general/pull/10483).
diff --git a/changelogs/fragments/10490-rocketchat.yml b/changelogs/fragments/10490-rocketchat.yml
new file mode 100644
index 0000000000..73657ba67c
--- /dev/null
+++ b/changelogs/fragments/10490-rocketchat.yml
@@ -0,0 +1,3 @@
+deprecated_features:
+ - "rocketchat - the default value for ``is_pre740``, currently ``true``, is deprecated and will change to ``false`` in community.general 13.0.0
+ (https://github.com/ansible-collections/community.general/pull/10490)."
diff --git a/changelogs/fragments/10491-irc.yml b/changelogs/fragments/10491-irc.yml
new file mode 100644
index 0000000000..74867e71a7
--- /dev/null
+++ b/changelogs/fragments/10491-irc.yml
@@ -0,0 +1,2 @@
+bugfixes:
+ - "irc - pass hostname to ``wrap_socket()`` if ``use_tls=true`` and ``validate_certs=true`` (https://github.com/ansible-collections/community.general/issues/10472, https://github.com/ansible-collections/community.general/pull/10491)."
diff --git a/changelogs/fragments/10493-nagios-services.yml b/changelogs/fragments/10493-nagios-services.yml
new file mode 100644
index 0000000000..3a04556c68
--- /dev/null
+++ b/changelogs/fragments/10493-nagios-services.yml
@@ -0,0 +1,2 @@
+minor_changes:
+ - nagios - make parameter ``services`` a ``list`` instead of a ``str`` (https://github.com/ansible-collections/community.general/pull/10493).
diff --git a/changelogs/fragments/10494-rfdn-1.yml b/changelogs/fragments/10494-rfdn-1.yml
new file mode 100644
index 0000000000..09a0c442b0
--- /dev/null
+++ b/changelogs/fragments/10494-rfdn-1.yml
@@ -0,0 +1,27 @@
+minor_changes:
+ - aerospike_migrations - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494).
+ - airbrake_deployment - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494).
+ - bigpanda - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494).
+ - bootc_manage - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494).
+ - bower - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494).
+ - btrfs_subvolume - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494).
+ - bundler - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494).
+ - campfire - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494).
+ - cargo - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494).
+ - catapult - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494).
+ - cisco_webex - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494).
+ - consul_kv - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494).
+ - consul_policy - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494).
+ - copr - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494).
+ - datadog_downtime - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494).
+ - datadog_monitor - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494).
+ - dconf - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494).
+ - dimensiondata_network - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494).
+ - dimensiondata_vlan - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494).
+ - dnf_config_manager - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494).
+ - dnsmadeeasy - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494).
+ - dpkg_divert - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494).
+ - easy_install - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494).
+ - elasticsearch_plugin - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494).
+ - facter - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494).
+ - filesystem - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494).
diff --git a/changelogs/fragments/10505-rfdn-2.yml b/changelogs/fragments/10505-rfdn-2.yml
new file mode 100644
index 0000000000..89aeab9356
--- /dev/null
+++ b/changelogs/fragments/10505-rfdn-2.yml
@@ -0,0 +1,39 @@
+minor_changes:
+ - gem - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505).
+ - git_config_info - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505).
+ - github_deploy_key - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505).
+ - github_repo - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505).
+ - github_webhook - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505).
+ - github_webhook_info - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505).
+ - gitlab_branch - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505).
+ - gitlab_group_access_token - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505).
+ - gitlab_group_variable - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505).
+ - gitlab_hook - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505).
+ - gitlab_instance_variable - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505).
+ - gitlab_issue - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505).
+ - gitlab_label - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505).
+ - gitlab_merge_request - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505).
+ - gitlab_milestone - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505).
+ - gitlab_project - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505).
+ - gitlab_project_access_token - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505).
+ - gitlab_project_variable - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505).
+ - grove - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505).
+ - hg - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505).
+ - homebrew - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505).
+ - homebrew_cask - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505).
+ - homebrew_tap - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505).
+ - honeybadger_deployment - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505).
+ - htpasswd - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505).
+ - icinga2_host - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505).
+ - influxdb_user - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505).
+ - ini_file - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505).
+ - ipa_dnsrecord - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505).
+ - ipa_dnszone - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505).
+ - ipa_service - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505).
+ - ipbase_info - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505).
+ - ipwcli_dns - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505).
+ - irc - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505).
+ - jabber - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505).
+ - jenkins_credential - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505).
+ - jenkins_job - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505).
+ - jenkins_script - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505).
diff --git a/changelogs/fragments/10507-rfdn-3.yml b/changelogs/fragments/10507-rfdn-3.yml
new file mode 100644
index 0000000000..fae9d118bc
--- /dev/null
+++ b/changelogs/fragments/10507-rfdn-3.yml
@@ -0,0 +1,35 @@
+minor_changes:
+ - keycloak_authz_authorization_scope - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507).
+ - keycloak_authz_permission - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507).
+ - keycloak_role - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507).
+ - keycloak_userprofile - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507).
+ - keyring - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507).
+ - kibana_plugin - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507).
+ - layman - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507).
+ - ldap_attrs - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507).
+ - ldap_inc - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507).
+ - librato_annotation - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507).
+ - lldp - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507).
+ - logentries - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507).
+ - lxca_cmms - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507).
+ - lxca_nodes - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507).
+ - macports - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507).
+ - mail - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507).
+ - manageiq_alerts - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507).
+ - manageiq_group - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507).
+ - manageiq_policies - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507).
+ - manageiq_policies_info - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507).
+ - manageiq_tags - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507).
+ - manageiq_tenant - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507).
+ - matrix - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507).
+ - mattermost - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507).
+ - maven_artifact - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507).
+ - memset_dns_reload - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507).
+ - memset_zone - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507).
+ - memset_zone_record - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507).
+ - mqtt - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507).
+ - mssql_db - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507).
+ - mssql_script - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507).
+ - netcup_dns - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507).
+ - newrelic_deployment - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507).
+ - nsupdate - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507).
diff --git a/changelogs/fragments/10512-rfdn-4.yml b/changelogs/fragments/10512-rfdn-4.yml
new file mode 100644
index 0000000000..6d8f9e7d77
--- /dev/null
+++ b/changelogs/fragments/10512-rfdn-4.yml
@@ -0,0 +1,42 @@
+minor_changes:
+ - oci_vcn - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512).
+ - one_image_info - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512).
+ - one_template - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512).
+ - one_vnet - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512).
+ - onepassword_info - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512).
+ - oneview_fc_network_info - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512).
+ - opendj_backendprop - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512).
+ - ovh_monthly_billing - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512).
+ - pagerduty - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512).
+ - pagerduty_change - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512).
+ - pagerduty_user - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512).
+ - pam_limits - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512).
+ - pear - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512).
+ - pkgng - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512).
+ - pnpm - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512).
+ - portage - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512).
+ - pritunl_org - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512).
+ - pritunl_org_info - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512).
+ - pritunl_user - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512).
+ - pritunl_user_info - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512).
+ - pubnub_blocks - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512).
+ - pushbullet - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512).
+ - pushover - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512).
+ - redis_data - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512).
+ - redis_data_incr - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512).
+ - riak - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512).
+ - rocketchat - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512).
+ - rollbar_deployment - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512).
+ - say - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512).
+ - scaleway_database_backup - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512).
+ - sendgrid - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512).
+ - sensu_silence - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512).
+ - sorcery - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512).
+ - ssh_config - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512).
+ - statusio_maintenance - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512).
+ - svr4pkg - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512).
+ - swdepot - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512).
+ - syslogger - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512).
+ - sysrc - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512).
+ - systemd_creds_decrypt - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512).
+ - systemd_creds_encrypt - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512).
diff --git a/changelogs/fragments/10513-rfdn-5.yml b/changelogs/fragments/10513-rfdn-5.yml
new file mode 100644
index 0000000000..d930d7345c
--- /dev/null
+++ b/changelogs/fragments/10513-rfdn-5.yml
@@ -0,0 +1,18 @@
+minor_changes:
+ - taiga_issue - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10513).
+ - twilio - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10513).
+ - utm_aaa_group - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10513).
+ - utm_ca_host_key_cert - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10513).
+ - utm_dns_host - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10513).
+ - utm_network_interface_address - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10513).
+ - utm_proxy_auth_profile - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10513).
+ - utm_proxy_exception - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10513).
+ - utm_proxy_frontend - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10513).
+ - utm_proxy_location - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10513).
+ - vertica_configuration - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10513).
+ - vertica_info - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10513).
+ - vertica_role - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10513).
+ - xbps - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10513).
+ - yarn - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10513).
+ - zypper - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10513).
+ - zypper_repository - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10513).
diff --git a/changelogs/fragments/10514-deprecate-bearychat.yml b/changelogs/fragments/10514-deprecate-bearychat.yml
new file mode 100644
index 0000000000..202210ac8c
--- /dev/null
+++ b/changelogs/fragments/10514-deprecate-bearychat.yml
@@ -0,0 +1,2 @@
+deprecated_features:
+ - bearychat - module is deprecated and will be removed in community.general 12.0.0 (https://github.com/ansible-collections/community.general/issues/10514).
diff --git a/changelogs/fragments/10520-arg-runcommand-list.yml b/changelogs/fragments/10520-arg-runcommand-list.yml
new file mode 100644
index 0000000000..4479b3a694
--- /dev/null
+++ b/changelogs/fragments/10520-arg-runcommand-list.yml
@@ -0,0 +1,2 @@
+minor_changes:
+ - apk - using safer mechanism to run external command (https://github.com/ansible-collections/community.general/issues/10479, https://github.com/ansible-collections/community.general/pull/10520).
diff --git a/changelogs/fragments/10523-bzr-cmd-list.yml b/changelogs/fragments/10523-bzr-cmd-list.yml
new file mode 100644
index 0000000000..fb6c8a6c47
--- /dev/null
+++ b/changelogs/fragments/10523-bzr-cmd-list.yml
@@ -0,0 +1,2 @@
+minor_changes:
+ - bzr - using safer mechanism to run external command (https://github.com/ansible-collections/community.general/pull/10523).
diff --git a/changelogs/fragments/10524-capabilities-cmd-list.yml b/changelogs/fragments/10524-capabilities-cmd-list.yml
new file mode 100644
index 0000000000..e6af832b5c
--- /dev/null
+++ b/changelogs/fragments/10524-capabilities-cmd-list.yml
@@ -0,0 +1,2 @@
+minor_changes:
+ - capabilities - using safer mechanism to run external command (https://github.com/ansible-collections/community.general/pull/10524).
diff --git a/changelogs/fragments/10525-composer-cmd-list.yml b/changelogs/fragments/10525-composer-cmd-list.yml
new file mode 100644
index 0000000000..a2aebc8a6d
--- /dev/null
+++ b/changelogs/fragments/10525-composer-cmd-list.yml
@@ -0,0 +1,2 @@
+minor_changes:
+ - composer - using safer mechanism to run external command (https://github.com/ansible-collections/community.general/pull/10525).
diff --git a/changelogs/fragments/10526-easy-install-cmd-list.yml b/changelogs/fragments/10526-easy-install-cmd-list.yml
new file mode 100644
index 0000000000..6fa6717adc
--- /dev/null
+++ b/changelogs/fragments/10526-easy-install-cmd-list.yml
@@ -0,0 +1,2 @@
+minor_changes:
+ - easy_install - using safer mechanism to run external command (https://github.com/ansible-collections/community.general/pull/10526).
diff --git a/changelogs/fragments/10527-keycloak-idp-well-known-url-support.yml b/changelogs/fragments/10527-keycloak-idp-well-known-url-support.yml
new file mode 100644
index 0000000000..cc2ae7efa0
--- /dev/null
+++ b/changelogs/fragments/10527-keycloak-idp-well-known-url-support.yml
@@ -0,0 +1,2 @@
+minor_changes:
+ - keycloak_identity_provider – add support for ``fromUrl`` to automatically fetch OIDC endpoints from the well-known discovery URL, simplifying identity provider configuration (https://github.com/ansible-collections/community.general/pull/10527).
\ No newline at end of file
diff --git a/changelogs/fragments/10531-wsl-paramiko.yml b/changelogs/fragments/10531-wsl-paramiko.yml
new file mode 100644
index 0000000000..08257d6c78
--- /dev/null
+++ b/changelogs/fragments/10531-wsl-paramiko.yml
@@ -0,0 +1,3 @@
+bugfixes:
+ - "wsl connection plugin - avoid deprecated ansible-core paramiko import helper, import paramiko directly instead
+ (https://github.com/ansible-collections/community.general/issues/10515, https://github.com/ansible-collections/community.general/pull/10531)."
diff --git a/changelogs/fragments/10532-apk.yml b/changelogs/fragments/10532-apk.yml
new file mode 100644
index 0000000000..84c5d985e8
--- /dev/null
+++ b/changelogs/fragments/10532-apk.yml
@@ -0,0 +1,2 @@
+bugfixes:
+ - "apk - fix check for empty/whitespace-only package names (https://github.com/ansible-collections/community.general/pull/10532)."
diff --git a/changelogs/fragments/10536-imgadm-cmd-list.yml b/changelogs/fragments/10536-imgadm-cmd-list.yml
new file mode 100644
index 0000000000..0f22c774d8
--- /dev/null
+++ b/changelogs/fragments/10536-imgadm-cmd-list.yml
@@ -0,0 +1,2 @@
+minor_changes:
+ - imgadm - using safer mechanism to run external command (https://github.com/ansible-collections/community.general/pull/10536).
diff --git a/changelogs/fragments/10538-keycloak-realm-add-support-client-options.yml b/changelogs/fragments/10538-keycloak-realm-add-support-client-options.yml
new file mode 100644
index 0000000000..66333b01a8
--- /dev/null
+++ b/changelogs/fragments/10538-keycloak-realm-add-support-client-options.yml
@@ -0,0 +1,2 @@
+minor_changes:
+ - keycloak_realm - add support for client-related options and Oauth2 device (https://github.com/ansible-collections/community.general/pull/10538).
\ No newline at end of file
diff --git a/changelogs/fragments/10539-json_query.yml b/changelogs/fragments/10539-json_query.yml
new file mode 100644
index 0000000000..7e84b7ecb0
--- /dev/null
+++ b/changelogs/fragments/10539-json_query.yml
@@ -0,0 +1,2 @@
+bugfixes:
+ - "json_query filter plugin - make compatible with lazy evaluation list and dictionary types of ansible-core 2.19 (https://github.com/ansible-collections/community.general/pull/10539)."
diff --git a/changelogs/fragments/10566-merge_variables.yml b/changelogs/fragments/10566-merge_variables.yml
new file mode 100644
index 0000000000..c0de6dd845
--- /dev/null
+++ b/changelogs/fragments/10566-merge_variables.yml
@@ -0,0 +1,2 @@
+bugfixes:
+ - "merge_variables lookup plugin - avoid deprecated functionality from ansible-core 2.19 (https://github.com/ansible-collections/community.general/pull/10566)."
diff --git a/changelogs/fragments/10573-logstash-plugin-cmd-list.yml b/changelogs/fragments/10573-logstash-plugin-cmd-list.yml
new file mode 100644
index 0000000000..441c1c49a3
--- /dev/null
+++ b/changelogs/fragments/10573-logstash-plugin-cmd-list.yml
@@ -0,0 +1,2 @@
+minor_changes:
+ - logstash_plugin - using safer mechanism to run external command (https://github.com/ansible-collections/community.general/issues/10479, https://github.com/ansible-collections/community.general/pull/10520).
diff --git a/changelogs/fragments/10574-django-runner.yml b/changelogs/fragments/10574-django-runner.yml
new file mode 100644
index 0000000000..a0bf6ec6d4
--- /dev/null
+++ b/changelogs/fragments/10574-django-runner.yml
@@ -0,0 +1,2 @@
+minor_changes:
+ - django module utils - remove deprecated parameter ``_DjangoRunner`` call (https://github.com/ansible-collections/community.general/pull/10574).
diff --git a/changelogs/fragments/10599-open-iscsi-cmd-list.yml b/changelogs/fragments/10599-open-iscsi-cmd-list.yml
new file mode 100644
index 0000000000..f8ef659ee9
--- /dev/null
+++ b/changelogs/fragments/10599-open-iscsi-cmd-list.yml
@@ -0,0 +1,2 @@
+minor_changes:
+ - open_iscsi - using safer mechanism to run external command (https://github.com/ansible-collections/community.general/pull/10599).
diff --git a/changelogs/fragments/10601-pear-cmd-list.yml b/changelogs/fragments/10601-pear-cmd-list.yml
new file mode 100644
index 0000000000..d5ab2d3d0e
--- /dev/null
+++ b/changelogs/fragments/10601-pear-cmd-list.yml
@@ -0,0 +1,2 @@
+minor_changes:
+ - pear - using safer mechanism to run external command (https://github.com/ansible-collections/community.general/pull/10601).
diff --git a/changelogs/fragments/10602-portage-cmd-list.yml b/changelogs/fragments/10602-portage-cmd-list.yml
new file mode 100644
index 0000000000..36b6711e00
--- /dev/null
+++ b/changelogs/fragments/10602-portage-cmd-list.yml
@@ -0,0 +1,2 @@
+minor_changes:
+ - portage - using safer mechanism to run external command (https://github.com/ansible-collections/community.general/pull/10602).
diff --git a/changelogs/fragments/10603-riak-cmd-list.yml b/changelogs/fragments/10603-riak-cmd-list.yml
new file mode 100644
index 0000000000..1a29a07c7f
--- /dev/null
+++ b/changelogs/fragments/10603-riak-cmd-list.yml
@@ -0,0 +1,2 @@
+minor_changes:
+ - riak - using safer mechanism to run external command (https://github.com/ansible-collections/community.general/pull/10603).
diff --git a/changelogs/fragments/10604-solaris-zone-cmd-list.yml b/changelogs/fragments/10604-solaris-zone-cmd-list.yml
new file mode 100644
index 0000000000..2fe52cbf31
--- /dev/null
+++ b/changelogs/fragments/10604-solaris-zone-cmd-list.yml
@@ -0,0 +1,2 @@
+minor_changes:
+ - solaris_zone - using safer mechanism to run external command (https://github.com/ansible-collections/community.general/pull/10604).
diff --git a/changelogs/fragments/10605-swupd-cmd-list.yml b/changelogs/fragments/10605-swupd-cmd-list.yml
new file mode 100644
index 0000000000..23669d7974
--- /dev/null
+++ b/changelogs/fragments/10605-swupd-cmd-list.yml
@@ -0,0 +1,2 @@
+minor_changes:
+ - swupd - using safer mechanism to run external command (https://github.com/ansible-collections/community.general/pull/10605).
diff --git a/changelogs/fragments/10606-urpmi-cmd-list.yml b/changelogs/fragments/10606-urpmi-cmd-list.yml
new file mode 100644
index 0000000000..a7a2e54a1e
--- /dev/null
+++ b/changelogs/fragments/10606-urpmi-cmd-list.yml
@@ -0,0 +1,2 @@
+minor_changes:
+ - urpmi - using safer mechanism to run external command (https://github.com/ansible-collections/community.general/pull/10606).
diff --git a/changelogs/fragments/10608-xbps-cmd-list.yml b/changelogs/fragments/10608-xbps-cmd-list.yml
new file mode 100644
index 0000000000..ff951a4520
--- /dev/null
+++ b/changelogs/fragments/10608-xbps-cmd-list.yml
@@ -0,0 +1,2 @@
+minor_changes:
+ - xbps - using safer mechanism to run external command (https://github.com/ansible-collections/community.general/pull/10608).
diff --git a/changelogs/fragments/10609-xfs-quota-cmd-list.yml b/changelogs/fragments/10609-xfs-quota-cmd-list.yml
new file mode 100644
index 0000000000..74e170ef09
--- /dev/null
+++ b/changelogs/fragments/10609-xfs-quota-cmd-list.yml
@@ -0,0 +1,2 @@
+minor_changes:
+ - xfs_quota - using safer mechanism to run external command (https://github.com/ansible-collections/community.general/pull/10609).
diff --git a/changelogs/fragments/10612-timezone-cmd-list.yml b/changelogs/fragments/10612-timezone-cmd-list.yml
new file mode 100644
index 0000000000..601375fbc5
--- /dev/null
+++ b/changelogs/fragments/10612-timezone-cmd-list.yml
@@ -0,0 +1,2 @@
+minor_changes:
+ - timezone - using safer mechanism to run external command (https://github.com/ansible-collections/community.general/pull/10612).
diff --git a/changelogs/fragments/10642-parted-cmd-list.yml b/changelogs/fragments/10642-parted-cmd-list.yml
new file mode 100644
index 0000000000..29025512dd
--- /dev/null
+++ b/changelogs/fragments/10642-parted-cmd-list.yml
@@ -0,0 +1,2 @@
+minor_changes:
+ - parted - using safer mechanism to run external command (https://github.com/ansible-collections/community.general/pull/10642).
diff --git a/changelogs/fragments/10644-oneview-os.yml b/changelogs/fragments/10644-oneview-os.yml
new file mode 100644
index 0000000000..f2789cf5fc
--- /dev/null
+++ b/changelogs/fragments/10644-oneview-os.yml
@@ -0,0 +1,2 @@
+breaking_changes:
+ - oneview module utils - remove import of standard library ``os`` (https://github.com/ansible-collections/community.general/pull/10644).
diff --git a/changelogs/fragments/10646-scaleway_container_cpu_limit.yml b/changelogs/fragments/10646-scaleway_container_cpu_limit.yml
new file mode 100644
index 0000000000..f23a1bb96d
--- /dev/null
+++ b/changelogs/fragments/10646-scaleway_container_cpu_limit.yml
@@ -0,0 +1,2 @@
+minor_changes:
+ - scaleway_container - add a ``cpu_limit`` argument (https://github.com/ansible-collections/community.general/pull/10646).
diff --git a/changelogs/fragments/10647-scaleway-module-defaults.yml b/changelogs/fragments/10647-scaleway-module-defaults.yml
new file mode 100644
index 0000000000..7fca7a171a
--- /dev/null
+++ b/changelogs/fragments/10647-scaleway-module-defaults.yml
@@ -0,0 +1,2 @@
+minor_changes:
+ - scaleway modules - add a ``scaleway`` group to use ``module_defaults`` (https://github.com/ansible-collections/community.general/pull/10647).
diff --git a/changelogs/fragments/10652-oracle-deprecation.yml b/changelogs/fragments/10652-oracle-deprecation.yml
new file mode 100644
index 0000000000..3842e994f8
--- /dev/null
+++ b/changelogs/fragments/10652-oracle-deprecation.yml
@@ -0,0 +1,4 @@
+deprecated_features:
+ - oci_utils module utils - utils is deprecated and will be removed in community.general 13.0.0 (https://github.com/ansible-collections/community.general/issues/10318, https://github.com/ansible-collections/community.general/pull/10652).
+ - oci_vcn - module is deprecated and will be removed in community.general 13.0.0 (https://github.com/ansible-collections/community.general/issues/10318, https://github.com/ansible-collections/community.general/pull/10652).
+ - oracle* doc fragments - fragments are deprecated and will be removed in community.general 13.0.0 (https://github.com/ansible-collections/community.general/issues/10318, https://github.com/ansible-collections/community.general/pull/10652).
diff --git a/changelogs/fragments/10661-support-gpg-auto-impor-keys-in-zypper.yml b/changelogs/fragments/10661-support-gpg-auto-impor-keys-in-zypper.yml
new file mode 100644
index 0000000000..333121902f
--- /dev/null
+++ b/changelogs/fragments/10661-support-gpg-auto-impor-keys-in-zypper.yml
@@ -0,0 +1,2 @@
+minor_changes:
+ - zypper - support the ``--gpg-auto-import-keys`` option in zypper (https://github.com/ansible-collections/community.general/issues/10660, https://github.com/ansible-collections/community.general/pull/10661).
diff --git a/changelogs/fragments/10663-pacemaker-resource-fix-resource-type.yml b/changelogs/fragments/10663-pacemaker-resource-fix-resource-type.yml
new file mode 100644
index 0000000000..270488d248
--- /dev/null
+++ b/changelogs/fragments/10663-pacemaker-resource-fix-resource-type.yml
@@ -0,0 +1,2 @@
+bugfixes:
+ - "pacemaker_resource - fix ``resource_type`` parameter formatting (https://github.com/ansible-collections/community.general/issues/10426, https://github.com/ansible-collections/community.general/pull/10663)."
diff --git a/changelogs/fragments/10665-pacemaker-resource-clone.yml b/changelogs/fragments/10665-pacemaker-resource-clone.yml
new file mode 100644
index 0000000000..c24420c598
--- /dev/null
+++ b/changelogs/fragments/10665-pacemaker-resource-clone.yml
@@ -0,0 +1,2 @@
+minor_changes:
+ - pacemaker_resource - add ``state=cloned`` for cloning pacemaker resources or groups (https://github.com/ansible-collections/community.general/issues/10322, https://github.com/ansible-collections/community.general/pull/10665).
diff --git a/changelogs/fragments/10679-gitlab-access-token-add-planner-role.yml b/changelogs/fragments/10679-gitlab-access-token-add-planner-role.yml
new file mode 100644
index 0000000000..65aeae2a86
--- /dev/null
+++ b/changelogs/fragments/10679-gitlab-access-token-add-planner-role.yml
@@ -0,0 +1,3 @@
+minor_changes:
+ - gitlab_group_access_token - add ``planner`` access level (https://github.com/ansible-collections/community.general/pull/10679).
+ - gitlab_project_access_token - add ``planner`` access level (https://github.com/ansible-collections/community.general/pull/10679).
diff --git a/changelogs/fragments/10684-django-improvements.yml b/changelogs/fragments/10684-django-improvements.yml
new file mode 100644
index 0000000000..a8ca1cfbe9
--- /dev/null
+++ b/changelogs/fragments/10684-django-improvements.yml
@@ -0,0 +1,4 @@
+minor_changes:
+ - django module utils - simplify/consolidate the common settings for the command line (https://github.com/ansible-collections/community.general/pull/10684).
+ - django_check - simplify/consolidate the common settings for the command line (https://github.com/ansible-collections/community.general/pull/10684).
+ - django_createcachetable - simplify/consolidate the common settings for the command line (https://github.com/ansible-collections/community.general/pull/10684).
diff --git a/changelogs/fragments/10687-deprecations.yml b/changelogs/fragments/10687-deprecations.yml
new file mode 100644
index 0000000000..62974ab6a0
--- /dev/null
+++ b/changelogs/fragments/10687-deprecations.yml
@@ -0,0 +1,2 @@
+bugfixes:
+ - "Avoid deprecated functionality in ansible-core 2.20 (https://github.com/ansible-collections/community.general/pull/10687)."
diff --git a/changelogs/fragments/10688-pids.yml b/changelogs/fragments/10688-pids.yml
new file mode 100644
index 0000000000..1ed97a6fed
--- /dev/null
+++ b/changelogs/fragments/10688-pids.yml
@@ -0,0 +1,2 @@
+bugfixes:
+ - "pids - prevent error when an empty string is provided for ``name`` (https://github.com/ansible-collections/community.general/issues/10672, https://github.com/ansible-collections/community.general/pull/10688)."
diff --git a/changelogs/fragments/10689-gem-prevent-soundness-issue.yml b/changelogs/fragments/10689-gem-prevent-soundness-issue.yml
new file mode 100644
index 0000000000..a55dba1ea1
--- /dev/null
+++ b/changelogs/fragments/10689-gem-prevent-soundness-issue.yml
@@ -0,0 +1,2 @@
+bugfixes:
+ - "gem - fix soundness issue when uninstalling default gems on Ubuntu (https://github.com/ansible-collections/community.general/issues/10451, https://github.com/ansible-collections/community.general/pull/10689)."
\ No newline at end of file
diff --git a/changelogs/fragments/10700-django-check-databases.yml b/changelogs/fragments/10700-django-check-databases.yml
new file mode 100644
index 0000000000..cfb8897f6a
--- /dev/null
+++ b/changelogs/fragments/10700-django-check-databases.yml
@@ -0,0 +1,2 @@
+minor_changes:
+ - django_check - rename parameter ``database`` to ``databases``, add alias for compatibility (https://github.com/ansible-collections/community.general/pull/10700).
diff --git a/changelogs/fragments/10705-openbsd-pkg-remove-unused.yml b/changelogs/fragments/10705-openbsd-pkg-remove-unused.yml
new file mode 100644
index 0000000000..2ceb1352b4
--- /dev/null
+++ b/changelogs/fragments/10705-openbsd-pkg-remove-unused.yml
@@ -0,0 +1,2 @@
+minor_changes:
+ - openbsd_pkg - add ``autoremove`` parameter to remove unused dependencies (https://github.com/ansible-collections/community.general/pull/10705).
diff --git a/changelogs/fragments/10707-pacemaker-maintenance-mode-regex.yml b/changelogs/fragments/10707-pacemaker-maintenance-mode-regex.yml
new file mode 100644
index 0000000000..ba5e08edd3
--- /dev/null
+++ b/changelogs/fragments/10707-pacemaker-maintenance-mode-regex.yml
@@ -0,0 +1,2 @@
+bugfixes:
+ - "pacemaker - use regex for matching ``maintenance-mode`` output to determine cluster maintenance status (https://github.com/ansible-collections/community.general/issues/10426, https://github.com/ansible-collections/community.general/pull/10707)."
diff --git a/changelogs/fragments/10711-pytohn-idioms-1.yml b/changelogs/fragments/10711-pytohn-idioms-1.yml
new file mode 100644
index 0000000000..18ae9db37b
--- /dev/null
+++ b/changelogs/fragments/10711-pytohn-idioms-1.yml
@@ -0,0 +1,6 @@
+minor_changes:
+ - gitlab_label - minor refactor to improve readability (https://github.com/ansible-collections/community.general/pull/10711).
+ - gitlab_milestone - minor refactor to improve readability (https://github.com/ansible-collections/community.general/pull/10711).
+ - ipa_host - minor refactor to improve readability (https://github.com/ansible-collections/community.general/pull/10711).
+ - lvg_rename - minor refactor to improve readability (https://github.com/ansible-collections/community.general/pull/10711).
+ - terraform - minor refactor to improve readability (https://github.com/ansible-collections/community.general/pull/10711).
diff --git a/changelogs/fragments/10712-python-idioms-2.yml b/changelogs/fragments/10712-python-idioms-2.yml
new file mode 100644
index 0000000000..8d49f1f86f
--- /dev/null
+++ b/changelogs/fragments/10712-python-idioms-2.yml
@@ -0,0 +1,7 @@
+minor_changes:
+ - iocage inventory plugin - minor refactor to improve readability (https://github.com/ansible-collections/community.general/pull/10712).
+ - manageiq - minor refactor to improve readability (https://github.com/ansible-collections/community.general/pull/10712).
+ - android_sdk - minor refactor to improve readability (https://github.com/ansible-collections/community.general/pull/10712).
+ - elasticsearch_plugin - minor refactor to improve readability (https://github.com/ansible-collections/community.general/pull/10712).
+ - manageiq_alert_profiles - minor refactor to improve readability (https://github.com/ansible-collections/community.general/pull/10712).
+ - one_vm - minor refactor to improve readability (https://github.com/ansible-collections/community.general/pull/10712).
diff --git a/changelogs/fragments/10727-python-idioms-3.yml b/changelogs/fragments/10727-python-idioms-3.yml
new file mode 100644
index 0000000000..9b92b8bbef
--- /dev/null
+++ b/changelogs/fragments/10727-python-idioms-3.yml
@@ -0,0 +1,10 @@
+minor_changes:
+ - filesize - minor refactor to simplify string formatting (https://github.com/ansible-collections/community.general/pull/10727).
+ - iptables_state - minor refactor to simplify string formatting (https://github.com/ansible-collections/community.general/pull/10727).
+ - manageiq_group - minor refactor to simplify string formatting (https://github.com/ansible-collections/community.general/pull/10727).
+ - manageiq_tenant - minor refactor to simplify string formatting (https://github.com/ansible-collections/community.general/pull/10727).
+ - mssql_db - minor refactor to simplify string formatting (https://github.com/ansible-collections/community.general/pull/10727).
+ - openbsd_pkg - minor refactor to simplify string formatting (https://github.com/ansible-collections/community.general/pull/10727).
+ - ufw - minor refactor to simplify string formatting (https://github.com/ansible-collections/community.general/pull/10727).
+ - xenserver_facts - minor refactor to simplify string formatting (https://github.com/ansible-collections/community.general/pull/10727).
+ - zfs_facts - minor refactor to simplify string formatting (https://github.com/ansible-collections/community.general/pull/10727).
diff --git a/changelogs/fragments/10741-pacemaker-cluster-cleanup-deprecate.yml b/changelogs/fragments/10741-pacemaker-cluster-cleanup-deprecate.yml
new file mode 100644
index 0000000000..4bb018a9c7
--- /dev/null
+++ b/changelogs/fragments/10741-pacemaker-cluster-cleanup-deprecate.yml
@@ -0,0 +1,2 @@
+deprecated_features:
+ - pacemaker_cluster - the state ``cleanup`` will be removed from community.general 14.0.0 (https://github.com/ansible-collections/community.general/pull/10741).
diff --git a/changelogs/fragments/10743-monit-handle-unknown-status.yml b/changelogs/fragments/10743-monit-handle-unknown-status.yml
new file mode 100644
index 0000000000..1c9fbb1101
--- /dev/null
+++ b/changelogs/fragments/10743-monit-handle-unknown-status.yml
@@ -0,0 +1,2 @@
+bugfixes:
+ - monit - fix crash caused by an unknown status value returned from the monit service (https://github.com/ansible-collections/community.general/issues/10742, https://github.com/ansible-collections/community.general/pull/10743).
diff --git a/changelogs/fragments/10751-kdeconfig-support-kwriteconfig6.yml b/changelogs/fragments/10751-kdeconfig-support-kwriteconfig6.yml
new file mode 100644
index 0000000000..716ffa35f1
--- /dev/null
+++ b/changelogs/fragments/10751-kdeconfig-support-kwriteconfig6.yml
@@ -0,0 +1,3 @@
+bugfixes:
+ - kdeconfig - ``kwriteconfig`` executable could not be discovered automatically on systems with only ``kwriteconfig6`` installed.
+ ``kwriteconfig6`` can now be discovered by Ansible (https://github.com/ansible-collections/community.general/issues/10746, https://github.com/ansible-collections/community.general/pull/10751).
\ No newline at end of file
diff --git a/changelogs/fragments/10752-selective-hardcoded-loop-var.yml b/changelogs/fragments/10752-selective-hardcoded-loop-var.yml
new file mode 100644
index 0000000000..cfc6bdd9e9
--- /dev/null
+++ b/changelogs/fragments/10752-selective-hardcoded-loop-var.yml
@@ -0,0 +1,2 @@
+bugfixes:
+ - selective callback plugin - specify ``ansible_loop_var`` instead of the explicit value ``item`` when printing task result (https://github.com/ansible-collections/community.general/pull/10752).
diff --git a/changelogs/fragments/10769-xenserver-rf.yml b/changelogs/fragments/10769-xenserver-rf.yml
new file mode 100644
index 0000000000..2c31edf886
--- /dev/null
+++ b/changelogs/fragments/10769-xenserver-rf.yml
@@ -0,0 +1,2 @@
+minor_changes:
+ - xenserver module utils - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10769).
diff --git a/changelogs/fragments/10785-gitlab-token-add-missing-scopes.yml b/changelogs/fragments/10785-gitlab-token-add-missing-scopes.yml
new file mode 100644
index 0000000000..a38d98a444
--- /dev/null
+++ b/changelogs/fragments/10785-gitlab-token-add-missing-scopes.yml
@@ -0,0 +1,3 @@
+minor_changes:
+ - gitlab_group_access_token - add missing scopes (https://github.com/ansible-collections/community.general/pull/10785).
+ - gitlab_project_access_token - add missing scopes (https://github.com/ansible-collections/community.general/pull/10785).
diff --git a/changelogs/fragments/10787-gitlab-variable-support-masked-and-hidden-variables.yml b/changelogs/fragments/10787-gitlab-variable-support-masked-and-hidden-variables.yml
new file mode 100644
index 0000000000..bbf5b6d9a5
--- /dev/null
+++ b/changelogs/fragments/10787-gitlab-variable-support-masked-and-hidden-variables.yml
@@ -0,0 +1,3 @@
+minor_changes:
+ - gitlab_group_variable - support masked-and-hidden variables (https://github.com/ansible-collections/community.general/pull/10787).
+ - gitlab_project_variable - support masked-and-hidden variables (https://github.com/ansible-collections/community.general/pull/10787).
diff --git a/changelogs/fragments/10795-gitlab_protected_branch-add-allow_force_push-code_owner_approval_required.yml b/changelogs/fragments/10795-gitlab_protected_branch-add-allow_force_push-code_owner_approval_required.yml
new file mode 100644
index 0000000000..ed4d4d78e8
--- /dev/null
+++ b/changelogs/fragments/10795-gitlab_protected_branch-add-allow_force_push-code_owner_approval_required.yml
@@ -0,0 +1,3 @@
+minor_changes:
+ - gitlab_protected_branch - add ``allow_force_push``, ``code_owner_approval_required`` (https://github.com/ansible-collections/community.general/pull/10795, https://github.com/ansible-collections/community.general/issues/6432, https://github.com/ansible-collections/community.general/issues/10289, https://github.com/ansible-collections/community.general/issues/10765).
+ - gitlab_protected_branch - update protected branches if possible instead of recreating them (https://github.com/ansible-collections/community.general/pull/10795).
diff --git a/changelogs/fragments/10796-rocketchat-force-content-type.yml b/changelogs/fragments/10796-rocketchat-force-content-type.yml
new file mode 100644
index 0000000000..96ca116e62
--- /dev/null
+++ b/changelogs/fragments/10796-rocketchat-force-content-type.yml
@@ -0,0 +1,2 @@
+bugfixes:
+ - rocketchat - fix message delivery in Rocket Chat >= 7.5.3 by forcing ``Content-Type`` header to ``application/json`` instead of the default ``application/x-www-form-urlencoded`` (https://github.com/ansible-collections/community.general/issues/10796, https://github.com/ansible-collections/community.general/pull/10796).
diff --git a/changelogs/fragments/10805-homebrew-support-old-names.yml b/changelogs/fragments/10805-homebrew-support-old-names.yml
new file mode 100644
index 0000000000..43d5a1c8bf
--- /dev/null
+++ b/changelogs/fragments/10805-homebrew-support-old-names.yml
@@ -0,0 +1,2 @@
+bugfixes:
+ - homebrew - do not fail when cask or formula name has changed in homebrew repo (https://github.com/ansible-collections/community.general/issues/10804, https://github.com/ansible-collections/community.general/pull/10805).
\ No newline at end of file
diff --git a/changelogs/fragments/10810-github_app_access_token-jwt.yml b/changelogs/fragments/10810-github_app_access_token-jwt.yml
new file mode 100644
index 0000000000..804ab9fbaa
--- /dev/null
+++ b/changelogs/fragments/10810-github_app_access_token-jwt.yml
@@ -0,0 +1,2 @@
+bugfixes:
+ - "github_app_access_token lookup plugin - fix compatibility imports for using jwt (https://github.com/ansible-collections/community.general/issues/10807, https://github.com/ansible-collections/community.general/pull/10810)."
diff --git a/changelogs/fragments/10812-gitlab-variable-add-description.yml b/changelogs/fragments/10812-gitlab-variable-add-description.yml
new file mode 100644
index 0000000000..1de0405aff
--- /dev/null
+++ b/changelogs/fragments/10812-gitlab-variable-add-description.yml
@@ -0,0 +1,4 @@
+minor_changes:
+ - gitlab_group_variable - add ``description`` option (https://github.com/ansible-collections/community.general/pull/10812).
+ - gitlab_instance_variable - add ``description`` option (https://github.com/ansible-collections/community.general/pull/10812).
+ - gitlab_project_variable - add ``description`` option (https://github.com/ansible-collections/community.general/pull/10812, https://github.com/ansible-collections/community.general/issues/8584, https://github.com/ansible-collections/community.general/issues/10809).
diff --git a/changelogs/fragments/10823-parted-fail-json-command.yml b/changelogs/fragments/10823-parted-fail-json-command.yml
new file mode 100644
index 0000000000..8a52be589e
--- /dev/null
+++ b/changelogs/fragments/10823-parted-fail-json-command.yml
@@ -0,0 +1,2 @@
+bugfixes:
+ - parted - variable is a list, not text (https://github.com/ansible-collections/community.general/pull/10823, https://github.com/ansible-collections/community.general/issues/10817).
diff --git a/changelogs/fragments/10829-fix-keycloak-role-changed-status.yml b/changelogs/fragments/10829-fix-keycloak-role-changed-status.yml
new file mode 100644
index 0000000000..8fd05ec182
--- /dev/null
+++ b/changelogs/fragments/10829-fix-keycloak-role-changed-status.yml
@@ -0,0 +1,2 @@
+bugfixes:
+ - keycloak_role - fixes an issue where the module incorrectly returns ``changed=true`` when using the alias ``clientId`` in composite roles (https://github.com/ansible-collections/community.general/pull/10829).
\ No newline at end of file
diff --git a/changelogs/fragments/10840-fix-keycloak-subgroup-search-realm.yml b/changelogs/fragments/10840-fix-keycloak-subgroup-search-realm.yml
new file mode 100644
index 0000000000..3b7818ee3e
--- /dev/null
+++ b/changelogs/fragments/10840-fix-keycloak-subgroup-search-realm.yml
@@ -0,0 +1,2 @@
+bugfixes:
+ - keycloak_group - fixes an issue where module ignores realm when searching subgroups by name (https://github.com/ansible-collections/community.general/pull/10840).
\ No newline at end of file
diff --git a/changelogs/fragments/10842-keycloak-client-scope-support.yml b/changelogs/fragments/10842-keycloak-client-scope-support.yml
new file mode 100644
index 0000000000..80266fa43b
--- /dev/null
+++ b/changelogs/fragments/10842-keycloak-client-scope-support.yml
@@ -0,0 +1,2 @@
+minor_changes:
+ - keycloak_client - add idempotent support for ``optional_client_scopes`` and ``optional_client_scopes``, and ensure consistent change detection between check mode and live run (https://github.com/ansible-collections/community.general/issues/5495, https://github.com/ansible-collections/community.general/pull/10842).
\ No newline at end of file
diff --git a/changelogs/fragments/10852-yaml.yml b/changelogs/fragments/10852-yaml.yml
new file mode 100644
index 0000000000..1319b94ab5
--- /dev/null
+++ b/changelogs/fragments/10852-yaml.yml
@@ -0,0 +1,2 @@
+bugfixes:
+ - "yaml cache plugin - make compatible with ansible-core 2.19 (https://github.com/ansible-collections/community.general/issues/10849, https://github.com/ansible-collections/community.general/issues/10852)."
diff --git a/changelogs/fragments/10857-github_deploy_key-err.yml b/changelogs/fragments/10857-github_deploy_key-err.yml
new file mode 100644
index 0000000000..58bac31c5e
--- /dev/null
+++ b/changelogs/fragments/10857-github_deploy_key-err.yml
@@ -0,0 +1,2 @@
+bugfixes:
+ - "github_deploy_key - fix bug during error handling if no body was present in the result (https://github.com/ansible-collections/community.general/issues/10853, https://github.com/ansible-collections/community.general/pull/10857)."
diff --git a/changelogs/fragments/10873-six.yml b/changelogs/fragments/10873-six.yml
new file mode 100644
index 0000000000..d9ea201520
--- /dev/null
+++ b/changelogs/fragments/10873-six.yml
@@ -0,0 +1,2 @@
+bugfixes:
+ - "Avoid usage of deprecated ``ansible.module_utils.six`` in all code that does not have to support Python 2 (https://github.com/ansible-collections/community.general/pull/10873)."
diff --git a/changelogs/fragments/10874-pipx-180.yml b/changelogs/fragments/10874-pipx-180.yml
new file mode 100644
index 0000000000..dd776827e8
--- /dev/null
+++ b/changelogs/fragments/10874-pipx-180.yml
@@ -0,0 +1,2 @@
+minor_changes:
+ - pipx module_utils - use ``PIPX_USE_EMOJI`` to disable emojis in the output of ``pipx`` 1.8.0 (https://github.com/ansible-collections/community.general/pull/10874).
diff --git a/changelogs/fragments/10880-github_app_access_token-lookup.yml b/changelogs/fragments/10880-github_app_access_token-lookup.yml
new file mode 100644
index 0000000000..b3c9503d59
--- /dev/null
+++ b/changelogs/fragments/10880-github_app_access_token-lookup.yml
@@ -0,0 +1,2 @@
+minor_changes:
+ - "github_app_access_token lookup plugin - add support for GitHub Enterprise Server (https://github.com/ansible-collections/community.general/issues/10879, https://github.com/ansible-collections/community.general/pull/10880)."
diff --git a/changelogs/fragments/10888-six.yml b/changelogs/fragments/10888-six.yml
new file mode 100644
index 0000000000..b1f09accb3
--- /dev/null
+++ b/changelogs/fragments/10888-six.yml
@@ -0,0 +1,2 @@
+bugfixes:
+ - "Remove all usage of ``ansible.module_utils.six`` (https://github.com/ansible-collections/community.general/pull/10888)."
diff --git a/changelogs/fragments/10891-dict-refactor.yml b/changelogs/fragments/10891-dict-refactor.yml
new file mode 100644
index 0000000000..63d5e585ff
--- /dev/null
+++ b/changelogs/fragments/10891-dict-refactor.yml
@@ -0,0 +1,6 @@
+minor_changes:
+ - dependent lookup plugin - refactor dict initialization, no impact to users (https://github.com/ansible-collections/community.general/pull/10891).
+ - scaleway module_utils - improve code readability, no impact to users (https://github.com/ansible-collections/community.general/pull/10891).
+ - pacemaker_cluster.py - refactor dict initialization, no impact to users (https://github.com/ansible-collections/community.general/pull/10891).
+ - pacemaker_resource.py - refactor dict initialization, no impact to users (https://github.com/ansible-collections/community.general/pull/10891).
+ - pacemaker_stonith.py - refactor dict initialization, no impact to users (https://github.com/ansible-collections/community.general/pull/10891).
diff --git a/changelogs/fragments/10892-remove-py2.yml b/changelogs/fragments/10892-remove-py2.yml
new file mode 100644
index 0000000000..69904d4777
--- /dev/null
+++ b/changelogs/fragments/10892-remove-py2.yml
@@ -0,0 +1,7 @@
+minor_changes:
+ - known_hosts module_utils - drop Python 2 support when parsing output of ``urlparse`` (https://github.com/ansible-collections/community.general/pull/10892).
+ - aix_inittab - drop Python 2 support for function ``zip`` (https://github.com/ansible-collections/community.general/pull/10892).
+ - copr - drop support for Python 2 interpreter (https://github.com/ansible-collections/community.general/pull/10892).
+ - dconf - drop support for Python 2 interpreter (https://github.com/ansible-collections/community.general/pull/10892).
+ - irc - drop Python 2 support for SSL context creation (https://github.com/ansible-collections/community.general/pull/10892).
+ - mail - drop Python 2 support for Message-ID domain setting (https://github.com/ansible-collections/community.general/pull/10892).
diff --git a/changelogs/fragments/10899-use-f-strings.yml b/changelogs/fragments/10899-use-f-strings.yml
new file mode 100644
index 0000000000..9752e5ebf2
--- /dev/null
+++ b/changelogs/fragments/10899-use-f-strings.yml
@@ -0,0 +1,14 @@
+minor_changes:
+ - wsl connection plugin - use f-strings for string templating (https://github.com/ansible-collections/community.general/pull/10899).
+ - accumulate filter plugin - use f-strings for string templating (https://github.com/ansible-collections/community.general/pull/10899).
+ - counter filter plugin - use f-strings for string templating (https://github.com/ansible-collections/community.general/pull/10899).
+ - crc32 filter plugin - use f-strings for string templating (https://github.com/ansible-collections/community.general/pull/10899).
+ - groupby_as_dict filter plugin - use f-strings for string templating (https://github.com/ansible-collections/community.general/pull/10899).
+ - hashids filter plugin - use f-strings for string templating (https://github.com/ansible-collections/community.general/pull/10899).
+ - json_query filter plugin - use f-strings for string templating (https://github.com/ansible-collections/community.general/pull/10899).
+ - lists filter plugin - use f-strings for string templating (https://github.com/ansible-collections/community.general/pull/10899).
+ - random_mac filter plugin - use f-strings for string templating (https://github.com/ansible-collections/community.general/pull/10899).
+ - time filter plugin - use f-strings for string templating (https://github.com/ansible-collections/community.general/pull/10899).
+ - unicode_normalize filter plugin - use f-strings for string templating (https://github.com/ansible-collections/community.general/pull/10899).
+ - passwordstore lookup plugin - use f-strings for string templating (https://github.com/ansible-collections/community.general/pull/10899).
+ - ansible_type plugin_utils plugin - use f-strings for string templating (https://github.com/ansible-collections/community.general/pull/10899).
diff --git a/changelogs/fragments/10903-2to3.yml b/changelogs/fragments/10903-2to3.yml
new file mode 100644
index 0000000000..af0b744456
--- /dev/null
+++ b/changelogs/fragments/10903-2to3.yml
@@ -0,0 +1,8 @@
+minor_changes:
+ - pickle cache plugin - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10903).
+ - counter_enabled callback plugin - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10903).
+ - wsl connection plugin - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10903).
+ - cobbler inventory plugin - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10903).
+ - linode inventory plugin - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10903).
+ - utm_utils module_utils plugin - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10903).
+ - vexata module_utils plugin - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10903).
diff --git a/changelogs/fragments/10904-2to3-mods.yml b/changelogs/fragments/10904-2to3-mods.yml
new file mode 100644
index 0000000000..12ca58b250
--- /dev/null
+++ b/changelogs/fragments/10904-2to3-mods.yml
@@ -0,0 +1,30 @@
+minor_changes:
+ - bitbucket_access_key - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10904).
+ - bitbucket_pipeline_known_host - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10904).
+ - bitbucket_pipeline_variable - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10904).
+ - bzr - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10904).
+ - capabilities - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10904).
+ - gitlab_milestone - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10904).
+ - haproxy - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10904).
+ - homebrew - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10904).
+ - homebrew_cask - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10904).
+ - hwc_network_vpc - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10904).
+ - hwc_smn_topic - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10904).
+ - idrac_redfish_config - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10904).
+ - idrac_redfish_info - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10904).
+ - influxdb_retention_policy - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10904).
+ - ini_file - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10904).
+ - interfaces_file - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10904).
+ - launchd - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10904).
+ - logentries - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10904).
+ - packet_sshkey - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10904).
+ - pamd - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10904).
+ - taiga_issue - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10904).
+ - vdo - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10904).
+ - vertica_role - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10904).
+ - vertica_schema - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10904).
+ - vertica_user - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10904).
+ - vexata_eg - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10904).
+ - vexata_volume - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10904).
+ - xcc_redfish_command - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10904).
+ - zypper - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10904).
diff --git a/changelogs/fragments/10905-java-keystore-simplify.yml b/changelogs/fragments/10905-java-keystore-simplify.yml
new file mode 100644
index 0000000000..7b2a0de53a
--- /dev/null
+++ b/changelogs/fragments/10905-java-keystore-simplify.yml
@@ -0,0 +1,2 @@
+minor_changes:
+ - java_keystore - remove redundant function (https://github.com/ansible-collections/community.general/pull/10905).
diff --git a/changelogs/fragments/10906-linode-modutils.yml b/changelogs/fragments/10906-linode-modutils.yml
new file mode 100644
index 0000000000..ced88a7474
--- /dev/null
+++ b/changelogs/fragments/10906-linode-modutils.yml
@@ -0,0 +1,2 @@
+minor_changes:
+ - linode module utils - remove redundant code for ancient versions of Ansible (https://github.com/ansible-collections/community.general/pull/10906).
diff --git a/changelogs/fragments/10907-2to3-mu.yml b/changelogs/fragments/10907-2to3-mu.yml
new file mode 100644
index 0000000000..af19593cf0
--- /dev/null
+++ b/changelogs/fragments/10907-2to3-mu.yml
@@ -0,0 +1,9 @@
+minor_changes:
+ - csv module_utils plugin - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10907).
+ - gitlab module_utils plugin - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10907).
+ - homebrew module_utils plugin - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10907).
+ - ilo_redfish_utils module_utils plugin - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10907).
+ - redfish_utils module_utils plugin - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10907).
+ - saslprep module_utils plugin - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10907).
+ - utm_utils module_utils plugin - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10907).
+ - vexata module_utils plugin - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10907).
diff --git a/changelogs/fragments/10908-archive-lzma.yml b/changelogs/fragments/10908-archive-lzma.yml
new file mode 100644
index 0000000000..bcce681bed
--- /dev/null
+++ b/changelogs/fragments/10908-archive-lzma.yml
@@ -0,0 +1,2 @@
+minor_changes:
+ - archive - remove conditional code for older Python versions (https://github.com/ansible-collections/community.general/pull/10908).
diff --git a/changelogs/fragments/10909-launchd-plistlib.yml b/changelogs/fragments/10909-launchd-plistlib.yml
new file mode 100644
index 0000000000..fc798c9ddb
--- /dev/null
+++ b/changelogs/fragments/10909-launchd-plistlib.yml
@@ -0,0 +1,2 @@
+minor_changes:
+ - launchd - remove conditional code supporting Python versions prior to 3.4 (https://github.com/ansible-collections/community.general/pull/10909).
diff --git a/changelogs/fragments/10918-gitlab-runner-fix-check-mode.yml b/changelogs/fragments/10918-gitlab-runner-fix-check-mode.yml
new file mode 100644
index 0000000000..214487938b
--- /dev/null
+++ b/changelogs/fragments/10918-gitlab-runner-fix-check-mode.yml
@@ -0,0 +1,2 @@
+bugfixes:
+ - gitlab_runner - fix exception in check mode when a new runner is created (https://github.com/ansible-collections/community.general/issues/8854).
diff --git a/changelogs/fragments/10933-keycloak-add-client-auth-for-clientsecret-modules.yml b/changelogs/fragments/10933-keycloak-add-client-auth-for-clientsecret-modules.yml
new file mode 100644
index 0000000000..df70186ff5
--- /dev/null
+++ b/changelogs/fragments/10933-keycloak-add-client-auth-for-clientsecret-modules.yml
@@ -0,0 +1,2 @@
+bugfixes:
+ - keycloak_clientsecret, keycloak_clientsecret_info - make ``client_auth`` work (https://github.com/ansible-collections/community.general/issues/10932, https://github.com/ansible-collections/community.general/pull/10933).
\ No newline at end of file
diff --git a/changelogs/fragments/10934-cloudflare-dns-srv-bug.yml b/changelogs/fragments/10934-cloudflare-dns-srv-bug.yml
new file mode 100644
index 0000000000..eb2b06d2f1
--- /dev/null
+++ b/changelogs/fragments/10934-cloudflare-dns-srv-bug.yml
@@ -0,0 +1,2 @@
+bugfixes:
+ - cloudflare_dns - roll back changes to SRV record validation (https://github.com/ansible-collections/community.general/issues/10934, https://github.com/ansible-collections/community.general/pull/10937).
diff --git a/changelogs/fragments/10940-use-f-strings-xenserver.yml b/changelogs/fragments/10940-use-f-strings-xenserver.yml
new file mode 100644
index 0000000000..114ac46486
--- /dev/null
+++ b/changelogs/fragments/10940-use-f-strings-xenserver.yml
@@ -0,0 +1,2 @@
+minor_changes:
+ - xenserver module utils plugin - use f-strings for string templating (https://github.com/ansible-collections/community.general/pull/10940).
diff --git a/changelogs/fragments/9499-typetalk-deprecation.yml b/changelogs/fragments/9499-typetalk-deprecation.yml
new file mode 100644
index 0000000000..8323bbe959
--- /dev/null
+++ b/changelogs/fragments/9499-typetalk-deprecation.yml
@@ -0,0 +1,2 @@
+deprecated_features:
+ - typetalk - module is deprecated and will be removed in community.general 13.0.0 (https://github.com/ansible-collections/community.general/pull/9499).
diff --git a/changelogs/fragments/ansible-core-2.16.yml b/changelogs/fragments/ansible-core-2.16.yml
new file mode 100644
index 0000000000..1132d20e3e
--- /dev/null
+++ b/changelogs/fragments/ansible-core-2.16.yml
@@ -0,0 +1,2 @@
+removed_features:
+ - "Ansible-core 2.16 is no longer supported. This also means that the collection now requires Python 3.7+ (https://github.com/ansible-collections/community.general/pull/10884)."
diff --git a/changelogs/fragments/become-pipelining.yml b/changelogs/fragments/become-pipelining.yml
new file mode 100644
index 0000000000..201d85f71c
--- /dev/null
+++ b/changelogs/fragments/become-pipelining.yml
@@ -0,0 +1,3 @@
+bugfixes:
+ - "doas become plugin - disable pipelining on ansible-core 2.19+. The plugin does not work with pipelining, and since ansible-core 2.19 become plugins can indicate that they do not work with pipelining (https://github.com/ansible-collections/community.general/issues/9977, https://github.com/ansible-collections/community.general/pull/10537)."
+ - "machinectl become plugin - disable pipelining on ansible-core 2.19+. The plugin does not work with pipelining, and since ansible-core 2.19 become plugins can indicate that they do not work with pipelining (https://github.com/ansible-collections/community.general/pull/10537)."
diff --git a/changelogs/fragments/deprecations.yml b/changelogs/fragments/deprecations.yml
new file mode 100644
index 0000000000..424b2d439b
--- /dev/null
+++ b/changelogs/fragments/deprecations.yml
@@ -0,0 +1,16 @@
+removed_features:
+ - "yaml callback plugin - the deprecated plugin has been removed. Use the default callback with ``result_format=yaml`` instead (https://github.com/ansible-collections/community.general/pull/10883)."
+ - "purestorage doc fragment - the modules using this doc fragment have been removed from community.general 3.0.0 (https://github.com/ansible-collections/community.general/pull/10883)."
+ - "pure module utils - the modules using this module utils have been removed from community.general 3.0.0 (https://github.com/ansible-collections/community.general/pull/10883)."
+ - "bearychat - the module has been removed as the chat service is no longer available (https://github.com/ansible-collections/community.general/pull/10883)."
+ - "facter - the module has been replaced by ``community.general.facter_facts`` (https://github.com/ansible-collections/community.general/pull/10883)."
+ - "pacemaker_cluster - the option ``state`` is now required (https://github.com/ansible-collections/community.general/pull/10883)."
+ - >-
+ opkg - the value ``""`` for the option ``force`` is no longer allowed. Omit ``force`` instead (https://github.com/ansible-collections/community.general/pull/10883).
+ - "cmd_runner_fmt module utils - the parameter ``ctx_ignore_none`` to argument formatters has been removed (https://github.com/ansible-collections/community.general/pull/10883)."
+ - "cmd_runner module utils - the parameter ``ignore_value_none`` to ``CmdRunner.__call__()`` has been removed (https://github.com/ansible-collections/community.general/pull/10883)."
+ - >-
+ mh.deco module utils - the parameters ``on_success`` and ``on_failure`` of ``cause()`` have been removed; use ``when="success"`` and ``when="failure"`` instead (https://github.com/ansible-collections/community.general/pull/10883).
+breaking_changes:
+ - "slack - the default of ``prepend_hash`` changed from ``auto`` to ``never`` (https://github.com/ansible-collections/community.general/pull/10883)."
+ - "mh.base module utils - ``debug`` will now always be delegated to the underlying ``AnsibleModule`` object (https://github.com/ansible-collections/community.general/pull/10883)."
diff --git a/changelogs/fragments/hiera.yml b/changelogs/fragments/hiera.yml
new file mode 100644
index 0000000000..70c75f059e
--- /dev/null
+++ b/changelogs/fragments/hiera.yml
@@ -0,0 +1,4 @@
+deprecated_features:
+ - "hiera lookup plugin - retrieving data with Hiera has been deprecated a long time ago; because of that this plugin will be removed from community.general 13.0.0.
+ If you disagree with this deprecation, please create an issue in the community.general repository
+ (https://github.com/ansible-collections/community.general/issues/4462, https://github.com/ansible-collections/community.general/pull/10779)."
diff --git a/changelogs/fragments/keycloak-realm-webauthn-policies.yml b/changelogs/fragments/keycloak-realm-webauthn-policies.yml
new file mode 100644
index 0000000000..91b1f67b3a
--- /dev/null
+++ b/changelogs/fragments/keycloak-realm-webauthn-policies.yml
@@ -0,0 +1,2 @@
+minor_changes:
+ - keycloak_realm - add support for WebAuthn policy configuration options, including both regular and passwordless WebAuthn policies (https://github.com/ansible-collections/community.general/pull/10791).
diff --git a/changelogs/fragments/logstash.yml b/changelogs/fragments/logstash.yml
new file mode 100644
index 0000000000..1c7ec89b7d
--- /dev/null
+++ b/changelogs/fragments/logstash.yml
@@ -0,0 +1,3 @@
+---
+bugfixes:
+ - logstash callback plugin - remove reference to Python 2 library (https://github.com/ansible-collections/community.general/pull/10345).
diff --git a/changelogs/fragments/lvm_pv.yml b/changelogs/fragments/lvm_pv.yml
new file mode 100644
index 0000000000..d0198d7ffb
--- /dev/null
+++ b/changelogs/fragments/lvm_pv.yml
@@ -0,0 +1,3 @@
+---
+bugfixes:
+ - lvm_pv - properly detect SCSI or NVMe devices to rescan (https://github.com/ansible-collections/community.general/issues/10444, https://github.com/ansible-collections/community.general/pull/10596).
diff --git a/changelogs/fragments/random_string_seed.yml b/changelogs/fragments/random_string_seed.yml
new file mode 100644
index 0000000000..a90b7d93b5
--- /dev/null
+++ b/changelogs/fragments/random_string_seed.yml
@@ -0,0 +1,3 @@
+---
+minor_changes:
+ - random_string lookup plugin - allow to specify seed while generating random string (https://github.com/ansible-collections/community.general/issues/5362, https://github.com/ansible-collections/community.general/pull/10710).
diff --git a/changelogs/fragments/replace-random-with-secrets.yml b/changelogs/fragments/replace-random-with-secrets.yml
new file mode 100644
index 0000000000..b82e59e7e9
--- /dev/null
+++ b/changelogs/fragments/replace-random-with-secrets.yml
@@ -0,0 +1,4 @@
+bugfixes:
+ - random_string lookup plugin - replace ``random.SystemRandom()`` with ``secrets.SystemRandom()`` when
+ generating strings. This has no practical effect, as both are the same
+ (https://github.com/ansible-collections/community.general/pull/10893).
diff --git a/tests/integration/targets/pids/files/obtainpid.sh b/docs/docsite/config.yml
similarity index 80%
rename from tests/integration/targets/pids/files/obtainpid.sh
rename to docs/docsite/config.yml
index 1090f87786..1d6cf8554a 100644
--- a/tests/integration/targets/pids/files/obtainpid.sh
+++ b/docs/docsite/config.yml
@@ -1,7 +1,7 @@
-#!/usr/bin/env bash
+---
# Copyright (c) Ansible Project
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-"$1" 100 &
-echo "$!" > "$2"
+changelog:
+ write_changelog: true
diff --git a/docs/docsite/extra-docs.yml b/docs/docsite/extra-docs.yml
index 2171031ac1..4594ab4c2d 100644
--- a/docs/docsite/extra-docs.yml
+++ b/docs/docsite/extra-docs.yml
@@ -8,3 +8,17 @@ sections:
toctree:
- filter_guide
- test_guide
+ - title: Technology Guides
+ toctree:
+ - guide_alicloud
+ - guide_iocage
+ - guide_online
+ - guide_packet
+ - guide_scaleway
+ - title: Developer Guides
+ toctree:
+ - guide_deps
+ - guide_vardict
+ - guide_cmdrunner
+ - guide_modulehelper
+ - guide_uthelper
diff --git a/docs/docsite/helper/lists_mergeby/default-recursive-true.yml b/docs/docsite/helper/lists_mergeby/default-recursive-true.yml
deleted file mode 100644
index 133c8f2aec..0000000000
--- a/docs/docsite/helper/lists_mergeby/default-recursive-true.yml
+++ /dev/null
@@ -1,24 +0,0 @@
----
-# Copyright (c) Ansible Project
-# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-list1:
- - name: myname01
- param01:
- x: default_value
- y: default_value
- list:
- - default_value
- - name: myname02
- param01: [1, 1, 2, 3]
-
-list2:
- - name: myname01
- param01:
- y: patch_value
- z: patch_value
- list:
- - patch_value
- - name: myname02
- param01: [3, 4, 4, {key: value}]
diff --git a/docs/docsite/helper/lists_mergeby/example-001.yml b/docs/docsite/helper/lists_mergeby/example-001.yml
deleted file mode 100644
index 0cf6a9b8a7..0000000000
--- a/docs/docsite/helper/lists_mergeby/example-001.yml
+++ /dev/null
@@ -1,14 +0,0 @@
----
-# Copyright (c) Ansible Project
-# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-- name: 1. Merge two lists by common attribute 'name'
- include_vars:
- dir: example-001_vars
-- debug:
- var: list3
- when: debug|d(false)|bool
-- template:
- src: list3.out.j2
- dest: example-001.out
diff --git a/docs/docsite/helper/lists_mergeby/example-001_vars/default-common.yml b/docs/docsite/helper/lists_mergeby/example-001_vars/default-common.yml
deleted file mode 120000
index 7ea8984a8d..0000000000
--- a/docs/docsite/helper/lists_mergeby/example-001_vars/default-common.yml
+++ /dev/null
@@ -1 +0,0 @@
-../default-common.yml
\ No newline at end of file
diff --git a/docs/docsite/helper/lists_mergeby/example-002.yml b/docs/docsite/helper/lists_mergeby/example-002.yml
deleted file mode 100644
index 5e6e0315df..0000000000
--- a/docs/docsite/helper/lists_mergeby/example-002.yml
+++ /dev/null
@@ -1,14 +0,0 @@
----
-# Copyright (c) Ansible Project
-# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-- name: 2. Merge two lists by common attribute 'name'
- include_vars:
- dir: example-002_vars
-- debug:
- var: list3
- when: debug|d(false)|bool
-- template:
- src: list3.out.j2
- dest: example-002.out
diff --git a/docs/docsite/helper/lists_mergeby/example-002_vars/default-common.yml b/docs/docsite/helper/lists_mergeby/example-002_vars/default-common.yml
deleted file mode 120000
index 7ea8984a8d..0000000000
--- a/docs/docsite/helper/lists_mergeby/example-002_vars/default-common.yml
+++ /dev/null
@@ -1 +0,0 @@
-../default-common.yml
\ No newline at end of file
diff --git a/docs/docsite/helper/lists_mergeby/example-003.yml b/docs/docsite/helper/lists_mergeby/example-003.yml
deleted file mode 100644
index 2f93ab8a27..0000000000
--- a/docs/docsite/helper/lists_mergeby/example-003.yml
+++ /dev/null
@@ -1,14 +0,0 @@
----
-# Copyright (c) Ansible Project
-# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-- name: 3. Merge recursive by 'name', replace lists (default)
- include_vars:
- dir: example-003_vars
-- debug:
- var: list3
- when: debug|d(false)|bool
-- template:
- src: list3.out.j2
- dest: example-003.out
diff --git a/docs/docsite/helper/lists_mergeby/example-003_vars/default-recursive-true.yml b/docs/docsite/helper/lists_mergeby/example-003_vars/default-recursive-true.yml
deleted file mode 120000
index 299736f8ad..0000000000
--- a/docs/docsite/helper/lists_mergeby/example-003_vars/default-recursive-true.yml
+++ /dev/null
@@ -1 +0,0 @@
-../default-recursive-true.yml
\ No newline at end of file
diff --git a/docs/docsite/helper/lists_mergeby/example-004.yml b/docs/docsite/helper/lists_mergeby/example-004.yml
deleted file mode 100644
index 3ef067faf3..0000000000
--- a/docs/docsite/helper/lists_mergeby/example-004.yml
+++ /dev/null
@@ -1,14 +0,0 @@
----
-# Copyright (c) Ansible Project
-# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-- name: 4. Merge recursive by 'name', keep lists
- include_vars:
- dir: example-004_vars
-- debug:
- var: list3
- when: debug|d(false)|bool
-- template:
- src: list3.out.j2
- dest: example-004.out
diff --git a/docs/docsite/helper/lists_mergeby/example-004_vars/default-recursive-true.yml b/docs/docsite/helper/lists_mergeby/example-004_vars/default-recursive-true.yml
deleted file mode 120000
index 299736f8ad..0000000000
--- a/docs/docsite/helper/lists_mergeby/example-004_vars/default-recursive-true.yml
+++ /dev/null
@@ -1 +0,0 @@
-../default-recursive-true.yml
\ No newline at end of file
diff --git a/docs/docsite/helper/lists_mergeby/example-004_vars/list3.yml b/docs/docsite/helper/lists_mergeby/example-004_vars/list3.yml
deleted file mode 100644
index a054ea1e73..0000000000
--- a/docs/docsite/helper/lists_mergeby/example-004_vars/list3.yml
+++ /dev/null
@@ -1,9 +0,0 @@
----
-# Copyright (c) Ansible Project
-# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-list3: "{{ [list1, list2]|
- community.general.lists_mergeby('name',
- recursive=true,
- list_merge='keep') }}"
diff --git a/docs/docsite/helper/lists_mergeby/example-005.yml b/docs/docsite/helper/lists_mergeby/example-005.yml
deleted file mode 100644
index 57e7a779d9..0000000000
--- a/docs/docsite/helper/lists_mergeby/example-005.yml
+++ /dev/null
@@ -1,14 +0,0 @@
----
-# Copyright (c) Ansible Project
-# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-- name: 5. Merge recursive by 'name', append lists
- include_vars:
- dir: example-005_vars
-- debug:
- var: list3
- when: debug|d(false)|bool
-- template:
- src: list3.out.j2
- dest: example-005.out
diff --git a/docs/docsite/helper/lists_mergeby/example-005_vars/default-recursive-true.yml b/docs/docsite/helper/lists_mergeby/example-005_vars/default-recursive-true.yml
deleted file mode 120000
index 299736f8ad..0000000000
--- a/docs/docsite/helper/lists_mergeby/example-005_vars/default-recursive-true.yml
+++ /dev/null
@@ -1 +0,0 @@
-../default-recursive-true.yml
\ No newline at end of file
diff --git a/docs/docsite/helper/lists_mergeby/example-005_vars/list3.yml b/docs/docsite/helper/lists_mergeby/example-005_vars/list3.yml
deleted file mode 100644
index 3480bf6581..0000000000
--- a/docs/docsite/helper/lists_mergeby/example-005_vars/list3.yml
+++ /dev/null
@@ -1,9 +0,0 @@
----
-# Copyright (c) Ansible Project
-# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-list3: "{{ [list1, list2]|
- community.general.lists_mergeby('name',
- recursive=true,
- list_merge='append') }}"
diff --git a/docs/docsite/helper/lists_mergeby/example-006.yml b/docs/docsite/helper/lists_mergeby/example-006.yml
deleted file mode 100644
index 41fc88e496..0000000000
--- a/docs/docsite/helper/lists_mergeby/example-006.yml
+++ /dev/null
@@ -1,14 +0,0 @@
----
-# Copyright (c) Ansible Project
-# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-- name: 6. Merge recursive by 'name', prepend lists
- include_vars:
- dir: example-006_vars
-- debug:
- var: list3
- when: debug|d(false)|bool
-- template:
- src: list3.out.j2
- dest: example-006.out
diff --git a/docs/docsite/helper/lists_mergeby/example-006_vars/default-recursive-true.yml b/docs/docsite/helper/lists_mergeby/example-006_vars/default-recursive-true.yml
deleted file mode 120000
index 299736f8ad..0000000000
--- a/docs/docsite/helper/lists_mergeby/example-006_vars/default-recursive-true.yml
+++ /dev/null
@@ -1 +0,0 @@
-../default-recursive-true.yml
\ No newline at end of file
diff --git a/docs/docsite/helper/lists_mergeby/example-006_vars/list3.yml b/docs/docsite/helper/lists_mergeby/example-006_vars/list3.yml
deleted file mode 100644
index 97513b5593..0000000000
--- a/docs/docsite/helper/lists_mergeby/example-006_vars/list3.yml
+++ /dev/null
@@ -1,9 +0,0 @@
----
-# Copyright (c) Ansible Project
-# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-list3: "{{ [list1, list2]|
- community.general.lists_mergeby('name',
- recursive=true,
- list_merge='prepend') }}"
diff --git a/docs/docsite/helper/lists_mergeby/example-007.yml b/docs/docsite/helper/lists_mergeby/example-007.yml
deleted file mode 100644
index 3de7158447..0000000000
--- a/docs/docsite/helper/lists_mergeby/example-007.yml
+++ /dev/null
@@ -1,14 +0,0 @@
----
-# Copyright (c) Ansible Project
-# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-- name: 7. Merge recursive by 'name', append lists 'remove present'
- include_vars:
- dir: example-007_vars
-- debug:
- var: list3
- when: debug|d(false)|bool
-- template:
- src: list3.out.j2
- dest: example-007.out
diff --git a/docs/docsite/helper/lists_mergeby/example-007_vars/default-recursive-true.yml b/docs/docsite/helper/lists_mergeby/example-007_vars/default-recursive-true.yml
deleted file mode 120000
index 299736f8ad..0000000000
--- a/docs/docsite/helper/lists_mergeby/example-007_vars/default-recursive-true.yml
+++ /dev/null
@@ -1 +0,0 @@
-../default-recursive-true.yml
\ No newline at end of file
diff --git a/docs/docsite/helper/lists_mergeby/example-007_vars/list3.yml b/docs/docsite/helper/lists_mergeby/example-007_vars/list3.yml
deleted file mode 100644
index cb51653b49..0000000000
--- a/docs/docsite/helper/lists_mergeby/example-007_vars/list3.yml
+++ /dev/null
@@ -1,9 +0,0 @@
----
-# Copyright (c) Ansible Project
-# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-list3: "{{ [list1, list2]|
- community.general.lists_mergeby('name',
- recursive=true,
- list_merge='append_rp') }}"
diff --git a/docs/docsite/helper/lists_mergeby/example-008.yml b/docs/docsite/helper/lists_mergeby/example-008.yml
deleted file mode 100644
index e33828bf9a..0000000000
--- a/docs/docsite/helper/lists_mergeby/example-008.yml
+++ /dev/null
@@ -1,14 +0,0 @@
----
-# Copyright (c) Ansible Project
-# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-- name: 8. Merge recursive by 'name', prepend lists 'remove present'
- include_vars:
- dir: example-008_vars
-- debug:
- var: list3
- when: debug|d(false)|bool
-- template:
- src: list3.out.j2
- dest: example-008.out
diff --git a/docs/docsite/helper/lists_mergeby/example-008_vars/default-recursive-true.yml b/docs/docsite/helper/lists_mergeby/example-008_vars/default-recursive-true.yml
deleted file mode 120000
index 299736f8ad..0000000000
--- a/docs/docsite/helper/lists_mergeby/example-008_vars/default-recursive-true.yml
+++ /dev/null
@@ -1 +0,0 @@
-../default-recursive-true.yml
\ No newline at end of file
diff --git a/docs/docsite/helper/lists_mergeby/example-008_vars/list3.yml b/docs/docsite/helper/lists_mergeby/example-008_vars/list3.yml
deleted file mode 100644
index af7001fc4a..0000000000
--- a/docs/docsite/helper/lists_mergeby/example-008_vars/list3.yml
+++ /dev/null
@@ -1,9 +0,0 @@
----
-# Copyright (c) Ansible Project
-# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-list3: "{{ [list1, list2]|
- community.general.lists_mergeby('name',
- recursive=true,
- list_merge='prepend_rp') }}"
diff --git a/docs/docsite/helper/lists_mergeby/examples.yml b/docs/docsite/helper/lists_mergeby/examples.yml
deleted file mode 100644
index 83b985084e..0000000000
--- a/docs/docsite/helper/lists_mergeby/examples.yml
+++ /dev/null
@@ -1,54 +0,0 @@
----
-# Copyright (c) Ansible Project
-# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-examples:
- - label: 'In the example below the lists are merged by the attribute ``name``:'
- file: example-001_vars/list3.yml
- lang: 'yaml+jinja'
- - label: 'This produces:'
- file: example-001.out
- lang: 'yaml'
- - label: 'It is possible to use a list of lists as an input of the filter:'
- file: example-002_vars/list3.yml
- lang: 'yaml+jinja'
- - label: 'This produces the same result as in the previous example:'
- file: example-002.out
- lang: 'yaml'
- - label: 'Example ``list_merge=replace`` (default):'
- file: example-003_vars/list3.yml
- lang: 'yaml+jinja'
- - label: 'This produces:'
- file: example-003.out
- lang: 'yaml'
- - label: 'Example ``list_merge=keep``:'
- file: example-004_vars/list3.yml
- lang: 'yaml+jinja'
- - label: 'This produces:'
- file: example-004.out
- lang: 'yaml'
- - label: 'Example ``list_merge=append``:'
- file: example-005_vars/list3.yml
- lang: 'yaml+jinja'
- - label: 'This produces:'
- file: example-005.out
- lang: 'yaml'
- - label: 'Example ``list_merge=prepend``:'
- file: example-006_vars/list3.yml
- lang: 'yaml+jinja'
- - label: 'This produces:'
- file: example-006.out
- lang: 'yaml'
- - label: 'Example ``list_merge=append_rp``:'
- file: example-007_vars/list3.yml
- lang: 'yaml+jinja'
- - label: 'This produces:'
- file: example-007.out
- lang: 'yaml'
- - label: 'Example ``list_merge=prepend_rp``:'
- file: example-008_vars/list3.yml
- lang: 'yaml+jinja'
- - label: 'This produces:'
- file: example-008.out
- lang: 'yaml'
diff --git a/docs/docsite/helper/lists_mergeby/filter_guide_abstract_informations_merging_lists_of_dictionaries.rst.j2 b/docs/docsite/helper/lists_mergeby/filter_guide_abstract_informations_merging_lists_of_dictionaries.rst.j2
deleted file mode 100644
index 71d0d5da6c..0000000000
--- a/docs/docsite/helper/lists_mergeby/filter_guide_abstract_informations_merging_lists_of_dictionaries.rst.j2
+++ /dev/null
@@ -1,62 +0,0 @@
-..
- Copyright (c) Ansible Project
- GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
- SPDX-License-Identifier: GPL-3.0-or-later
-
-Merging lists of dictionaries
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-If you have two or more lists of dictionaries and want to combine them into a list of merged dictionaries, where the dictionaries are merged by an attribute, you can use the ``lists_mergeby`` filter.
-
-.. note:: The output of the examples in this section use the YAML callback plugin. Quoting: "Ansible output that can be quite a bit easier to read than the default JSON formatting." See :ref:`the documentation for the community.general.yaml callback plugin `.
-
-Let us use the lists below in the following examples:
-
-.. code-block:: yaml
-
- {{ lookup('file', 'default-common.yml')|indent(2) }}
-
-{% for i in examples[0:2] %}
-{{ i.label }}
-
-.. code-block:: {{ i.lang }}
-
- {{ lookup('file', i.file)|indent(2) }}
-
-{% endfor %}
-
-.. versionadded:: 2.0.0
-
-{% for i in examples[2:4] %}
-{{ i.label }}
-
-.. code-block:: {{ i.lang }}
-
- {{ lookup('file', i.file)|indent(2) }}
-
-{% endfor %}
-
-The filter also accepts two optional parameters: ``recursive`` and ``list_merge``. These parameters are only supported when used with ansible-base 2.10 or ansible-core, but not with Ansible 2.9. This is available since community.general 4.4.0.
-
-**recursive**
- Is a boolean, default to ``False``. Should the ``community.general.lists_mergeby`` recursively merge nested hashes. Note: It does not depend on the value of the ``hash_behaviour`` setting in ``ansible.cfg``.
-
-**list_merge**
- Is a string, its possible values are ``replace`` (default), ``keep``, ``append``, ``prepend``, ``append_rp`` or ``prepend_rp``. It modifies the behaviour of ``community.general.lists_mergeby`` when the hashes to merge contain arrays/lists.
-
-The examples below set ``recursive=true`` and display the differences among all six options of ``list_merge``. Functionality of the parameters is exactly the same as in the filter ``combine``. See :ref:`Combining hashes/dictionaries ` to learn details about these options.
-
-Let us use the lists below in the following examples
-
-.. code-block:: yaml
-
- {{ lookup('file', 'default-recursive-true.yml')|indent(2) }}
-
-{% for i in examples[4:16] %}
-{{ i.label }}
-
-.. code-block:: {{ i.lang }}
-
- {{ lookup('file', i.file)|indent(2) }}
-
-{% endfor %}
diff --git a/docs/docsite/helper/lists_mergeby/playbook.yml b/docs/docsite/helper/lists_mergeby/playbook.yml
deleted file mode 100644
index 793d233485..0000000000
--- a/docs/docsite/helper/lists_mergeby/playbook.yml
+++ /dev/null
@@ -1,62 +0,0 @@
----
-# Copyright (c) Ansible Project
-# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-# 1) Run all examples and create example-XXX.out
-# shell> ansible-playbook playbook.yml -e examples=true
-#
-# 2) Optionally, for testing, create examples_all.rst
-# shell> ansible-playbook playbook.yml -e examples_all=true
-#
-# 3) Create docs REST files
-# shell> ansible-playbook playbook.yml -e merging_lists_of_dictionaries=true
-#
-# Notes:
-# * Use YAML callback, e.g. set ANSIBLE_STDOUT_CALLBACK=community.general.yaml
-# * Use sphinx-view to render and review the REST files
-# shell> sphinx-view /examples_all.rst
-# * Proofread and copy completed docs *.rst files into the directory rst.
-# * Then delete the *.rst and *.out files from this directory. Do not
-# add *.rst and *.out in this directory to the version control.
-#
-# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-# community.general/docs/docsite/helper/lists_mergeby/playbook.yml
-
-- hosts: localhost
- gather_facts: false
- tasks:
-
- - block:
- - import_tasks: example-001.yml
- tags: t001
- - import_tasks: example-002.yml
- tags: t002
- - import_tasks: example-003.yml
- tags: t003
- - import_tasks: example-004.yml
- tags: t004
- - import_tasks: example-005.yml
- tags: t005
- - import_tasks: example-006.yml
- tags: t006
- - import_tasks: example-007.yml
- tags: t007
- - import_tasks: example-008.yml
- tags: t008
- when: examples|d(false)|bool
-
- - block:
- - include_vars: examples.yml
- - template:
- src: examples_all.rst.j2
- dest: examples_all.rst
- when: examples_all|d(false)|bool
-
- - block:
- - include_vars: examples.yml
- - template:
- src: filter_guide_abstract_informations_merging_lists_of_dictionaries.rst.j2
- dest: filter_guide_abstract_informations_merging_lists_of_dictionaries.rst
- when: merging_lists_of_dictionaries|d(false)|bool
diff --git a/docs/docsite/links.yml b/docs/docsite/links.yml
index bd954c4096..fe41d1d2fd 100644
--- a/docs/docsite/links.yml
+++ b/docs/docsite/links.yml
@@ -9,6 +9,8 @@ edit_on_github:
path_prefix: ''
extra_links:
+ - description: Ask for help
+ url: https://forum.ansible.com/c/help/6/none
- description: Submit a bug report
url: https://github.com/ansible-collections/community.general/issues/new?assignees=&labels=&template=bug_report.yml
- description: Request a feature
@@ -22,6 +24,10 @@ communication:
- topic: General usage and support questions
network: Libera
channel: '#ansible'
- mailing_lists:
- - topic: Ansible Project List
- url: https://groups.google.com/g/ansible-project
+ forums:
+ - topic: "Ansible Forum: General usage and support questions"
+ # The following URL directly points to the "Get Help" section
+ url: https://forum.ansible.com/c/help/6/none
+ - topic: "Ansible Forum: Discussions about the collection itself, not for specific modules or plugins"
+ # The following URL directly points to the "community-general" tag
+ url: https://forum.ansible.com/tag/community-general
diff --git a/docs/docsite/rst/filter_guide-abstract_informations-lists_of_dictionaries-keep_keys.rst b/docs/docsite/rst/filter_guide-abstract_informations-lists_of_dictionaries-keep_keys.rst
new file mode 100644
index 0000000000..3549d29ba7
--- /dev/null
+++ b/docs/docsite/rst/filter_guide-abstract_informations-lists_of_dictionaries-keep_keys.rst
@@ -0,0 +1,151 @@
+..
+ Copyright (c) Ansible Project
+ GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+ SPDX-License-Identifier: GPL-3.0-or-later
+
+keep_keys
+"""""""""
+
+Use the filter :ansplugin:`community.general.keep_keys#filter` if you have a list of dictionaries and want to keep certain keys only.
+
+.. note:: The output of the examples in this section use the YAML callback plugin. Quoting: "Ansible output that can be quite a bit easier to read than the default JSON formatting." See :ansplugin:`the documentation for the community.general.yaml callback plugin `.
+
+
+Let us use the below list in the following examples:
+
+.. code-block:: yaml
+
+ input:
+ - k0_x0: A0
+ k1_x1: B0
+ k2_x2: [C0]
+ k3_x3: foo
+ - k0_x0: A1
+ k1_x1: B1
+ k2_x2: [C1]
+ k3_x3: bar
+
+
+* By default, match keys that equal any of the items in the target.
+
+.. code-block:: yaml+jinja
+ :emphasize-lines: 1
+
+ target: ['k0_x0', 'k1_x1']
+ result: "{{ input | community.general.keep_keys(target=target) }}"
+
+
+gives
+
+.. code-block:: yaml
+ :emphasize-lines: 1-
+
+ result:
+ - {k0_x0: A0, k1_x1: B0}
+ - {k0_x0: A1, k1_x1: B1}
+
+
+.. versionadded:: 9.1.0
+
+* The results of the below examples 1-5 are all the same:
+
+.. code-block:: yaml
+ :emphasize-lines: 1-
+
+ result:
+ - {k0_x0: A0, k1_x1: B0}
+ - {k0_x0: A1, k1_x1: B1}
+
+
+1. Match keys that equal any of the items in the target.
+
+.. code-block:: yaml+jinja
+ :emphasize-lines: 1,2
+
+ mp: equal
+ target: ['k0_x0', 'k1_x1']
+ result: "{{ input | community.general.keep_keys(target=target, matching_parameter=mp) }}"
+
+2. Match keys that start with any of the items in the target.
+
+.. code-block:: yaml+jinja
+ :emphasize-lines: 1,2
+
+ mp: starts_with
+ target: ['k0', 'k1']
+ result: "{{ input | community.general.keep_keys(target=target, matching_parameter=mp) }}"
+
+3. Match keys that end with any of the items in target.
+
+.. code-block:: yaml+jinja
+ :emphasize-lines: 1,2
+
+ mp: ends_with
+ target: ['x0', 'x1']
+ result: "{{ input | community.general.keep_keys(target=target, matching_parameter=mp) }}"
+
+4. Match keys by the regex.
+
+.. code-block:: yaml+jinja
+ :emphasize-lines: 1,2
+
+ mp: regex
+ target: ['^.*[01]_x.*$']
+ result: "{{ input | community.general.keep_keys(target=target, matching_parameter=mp) }}"
+
+5. Match keys by the regex.
+
+.. code-block:: yaml+jinja
+ :emphasize-lines: 1,2
+
+ mp: regex
+ target: ^.*[01]_x.*$
+ result: "{{ input | community.general.keep_keys(target=target, matching_parameter=mp) }}"
+
+
+* The results of the below examples 6-9 are all the same:
+
+.. code-block:: yaml
+ :emphasize-lines: 1-
+
+ result:
+ - {k0_x0: A0}
+ - {k0_x0: A1}
+
+
+6. Match keys that equal the target.
+
+.. code-block:: yaml+jinja
+ :emphasize-lines: 1,2
+
+ mp: equal
+ target: k0_x0
+ result: "{{ input | community.general.keep_keys(target=target, matching_parameter=mp) }}"
+
+7. Match keys that start with the target.
+
+.. code-block:: yaml+jinja
+ :emphasize-lines: 1,2
+
+ mp: starts_with
+ target: k0
+ result: "{{ input | community.general.keep_keys(target=target, matching_parameter=mp) }}"
+
+8. Match keys that end with the target.
+
+.. code-block:: yaml+jinja
+ :emphasize-lines: 1,2
+
+ mp: ends_with
+ target: x0
+ result: "{{ input | community.general.keep_keys(target=target, matching_parameter=mp) }}"
+
+9. Match keys by the regex.
+
+.. code-block:: yaml+jinja
+ :emphasize-lines: 1,2
+
+ mp: regex
+ target: ^.*0_x.*$
+ result: "{{ input | community.general.keep_keys(target=target, matching_parameter=mp) }}"
+
diff --git a/docs/docsite/rst/filter_guide-abstract_informations-lists_of_dictionaries-remove_keys.rst b/docs/docsite/rst/filter_guide-abstract_informations-lists_of_dictionaries-remove_keys.rst
new file mode 100644
index 0000000000..4ac87ab79c
--- /dev/null
+++ b/docs/docsite/rst/filter_guide-abstract_informations-lists_of_dictionaries-remove_keys.rst
@@ -0,0 +1,159 @@
+..
+ Copyright (c) Ansible Project
+ GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+ SPDX-License-Identifier: GPL-3.0-or-later
+
+remove_keys
+"""""""""""
+
+Use the filter :ansplugin:`community.general.remove_keys#filter` if you have a list of dictionaries and want to remove certain keys.
+
+.. note:: The output of the examples in this section use the YAML callback plugin. Quoting: "Ansible output that can be quite a bit easier to read than the default JSON formatting." See See :ansplugin:`the documentation for the community.general.yaml callback plugin `.
+
+
+Let us use the below list in the following examples:
+
+.. code-block:: yaml
+
+ input:
+ - k0_x0: A0
+ k1_x1: B0
+ k2_x2: [C0]
+ k3_x3: foo
+ - k0_x0: A1
+ k1_x1: B1
+ k2_x2: [C1]
+ k3_x3: bar
+
+
+* By default, match keys that equal any of the items in the target.
+
+.. code-block:: yaml+jinja
+ :emphasize-lines: 1
+
+ target: ['k0_x0', 'k1_x1']
+ result: "{{ input | community.general.remove_keys(target=target) }}"
+
+
+gives
+
+.. code-block:: yaml
+ :emphasize-lines: 1-
+
+ result:
+ - k2_x2: [C0]
+ k3_x3: foo
+ - k2_x2: [C1]
+ k3_x3: bar
+
+
+.. versionadded:: 9.1.0
+
+* The results of the below examples 1-5 are all the same:
+
+.. code-block:: yaml
+ :emphasize-lines: 1-
+
+ result:
+ - k2_x2: [C0]
+ k3_x3: foo
+ - k2_x2: [C1]
+ k3_x3: bar
+
+
+1. Match keys that equal any of the items in the target.
+
+.. code-block:: yaml+jinja
+ :emphasize-lines: 1,2
+
+ mp: equal
+ target: ['k0_x0', 'k1_x1']
+ result: "{{ input | community.general.remove_keys(target=target, matching_parameter=mp) }}"
+
+2. Match keys that start with any of the items in the target.
+
+.. code-block:: yaml+jinja
+ :emphasize-lines: 1,2
+
+ mp: starts_with
+ target: ['k0', 'k1']
+ result: "{{ input | community.general.remove_keys(target=target, matching_parameter=mp) }}"
+
+3. Match keys that end with any of the items in target.
+
+.. code-block:: yaml+jinja
+ :emphasize-lines: 1,2
+
+ mp: ends_with
+ target: ['x0', 'x1']
+ result: "{{ input | community.general.remove_keys(target=target, matching_parameter=mp) }}"
+
+4. Match keys by the regex.
+
+.. code-block:: yaml+jinja
+ :emphasize-lines: 1,2
+
+ mp: regex
+ target: ['^.*[01]_x.*$']
+ result: "{{ input | community.general.remove_keys(target=target, matching_parameter=mp) }}"
+
+5. Match keys by the regex.
+
+.. code-block:: yaml+jinja
+ :emphasize-lines: 1,2
+
+ mp: regex
+ target: ^.*[01]_x.*$
+ result: "{{ input | community.general.remove_keys(target=target, matching_parameter=mp) }}"
+
+
+* The results of the below examples 6-9 are all the same:
+
+.. code-block:: yaml
+ :emphasize-lines: 1-
+
+ result:
+ - k1_x1: B0
+ k2_x2: [C0]
+ k3_x3: foo
+ - k1_x1: B1
+ k2_x2: [C1]
+ k3_x3: bar
+
+
+6. Match keys that equal the target.
+
+.. code-block:: yaml+jinja
+ :emphasize-lines: 1,2
+
+ mp: equal
+ target: k0_x0
+ result: "{{ input | community.general.remove_keys(target=target, matching_parameter=mp) }}"
+
+7. Match keys that start with the target.
+
+.. code-block:: yaml+jinja
+ :emphasize-lines: 1,2
+
+ mp: starts_with
+ target: k0
+ result: "{{ input | community.general.remove_keys(target=target, matching_parameter=mp) }}"
+
+8. Match keys that end with the target.
+
+.. code-block:: yaml+jinja
+ :emphasize-lines: 1,2
+
+ mp: ends_with
+ target: x0
+ result: "{{ input | community.general.remove_keys(target=target, matching_parameter=mp) }}"
+
+9. Match keys by the regex.
+
+.. code-block:: yaml+jinja
+ :emphasize-lines: 1,2
+
+ mp: regex
+ target: ^.*0_x.*$
+ result: "{{ input | community.general.remove_keys(target=target, matching_parameter=mp) }}"
+
diff --git a/docs/docsite/rst/filter_guide-abstract_informations-lists_of_dictionaries-replace_keys.rst b/docs/docsite/rst/filter_guide-abstract_informations-lists_of_dictionaries-replace_keys.rst
new file mode 100644
index 0000000000..d0eb202bfe
--- /dev/null
+++ b/docs/docsite/rst/filter_guide-abstract_informations-lists_of_dictionaries-replace_keys.rst
@@ -0,0 +1,175 @@
+..
+ Copyright (c) Ansible Project
+ GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+ SPDX-License-Identifier: GPL-3.0-or-later
+
+replace_keys
+""""""""""""
+
+Use the filter :ansplugin:`community.general.replace_keys#filter` if you have a list of dictionaries and want to replace certain keys.
+
+.. note:: The output of the examples in this section use the YAML callback plugin. Quoting: "Ansible output that can be quite a bit easier to read than the default JSON formatting." See :ansplugin:`the documentation for the community.general.yaml callback plugin `.
+
+
+Let us use the below list in the following examples:
+
+.. code-block:: yaml
+
+ input:
+ - k0_x0: A0
+ k1_x1: B0
+ k2_x2: [C0]
+ k3_x3: foo
+ - k0_x0: A1
+ k1_x1: B1
+ k2_x2: [C1]
+ k3_x3: bar
+
+
+* By default, match keys that equal any of the attributes before.
+
+.. code-block:: yaml+jinja
+ :emphasize-lines: 1-3
+
+ target:
+ - {after: a0, before: k0_x0}
+ - {after: a1, before: k1_x1}
+
+ result: "{{ input | community.general.replace_keys(target=target) }}"
+
+
+gives
+
+.. code-block:: yaml
+ :emphasize-lines: 1-
+
+ result:
+ - a0: A0
+ a1: B0
+ k2_x2: [C0]
+ k3_x3: foo
+ - a0: A1
+ a1: B1
+ k2_x2: [C1]
+ k3_x3: bar
+
+
+.. versionadded:: 9.1.0
+
+* The results of the below examples 1-3 are all the same:
+
+.. code-block:: yaml
+ :emphasize-lines: 1-
+
+ result:
+ - a0: A0
+ a1: B0
+ k2_x2: [C0]
+ k3_x3: foo
+ - a0: A1
+ a1: B1
+ k2_x2: [C1]
+ k3_x3: bar
+
+
+1. Replace keys that starts with any of the attributes before.
+
+.. code-block:: yaml+jinja
+ :emphasize-lines: 1-4
+
+ mp: starts_with
+ target:
+ - {after: a0, before: k0}
+ - {after: a1, before: k1}
+
+ result: "{{ input | community.general.replace_keys(target=target, matching_parameter=mp) }}"
+
+2. Replace keys that ends with any of the attributes before.
+
+.. code-block:: yaml+jinja
+ :emphasize-lines: 1-4
+
+ mp: ends_with
+ target:
+ - {after: a0, before: x0}
+ - {after: a1, before: x1}
+
+ result: "{{ input | community.general.replace_keys(target=target, matching_parameter=mp) }}"
+
+3. Replace keys that match any regex of the attributes before.
+
+.. code-block:: yaml+jinja
+ :emphasize-lines: 1-4
+
+ mp: regex
+ target:
+ - {after: a0, before: ^.*0_x.*$}
+ - {after: a1, before: ^.*1_x.*$}
+
+ result: "{{ input | community.general.replace_keys(target=target, matching_parameter=mp) }}"
+
+
+* The results of the below examples 4-5 are the same:
+
+.. code-block:: yaml
+ :emphasize-lines: 1-
+
+ result:
+ - {X: foo}
+ - {X: bar}
+
+
+4. If more keys match the same attribute before the last one will be used.
+
+.. code-block:: yaml+jinja
+ :emphasize-lines: 1-3
+
+ mp: regex
+ target:
+ - {after: X, before: ^.*_x.*$}
+
+ result: "{{ input | community.general.replace_keys(target=target, matching_parameter=mp) }}"
+
+5. If there are items with equal attribute before the first one will be used.
+
+.. code-block:: yaml+jinja
+ :emphasize-lines: 1-3
+
+ mp: regex
+ target:
+ - {after: X, before: ^.*_x.*$}
+ - {after: Y, before: ^.*_x.*$}
+
+ result: "{{ input | community.general.replace_keys(target=target, matching_parameter=mp) }}"
+
+
+6. If there are more matches for a key the first one will be used.
+
+.. code-block:: yaml
+ :emphasize-lines: 1-
+
+ input:
+ - {aaa1: A, bbb1: B, ccc1: C}
+ - {aaa2: D, bbb2: E, ccc2: F}
+
+
+.. code-block:: yaml+jinja
+ :emphasize-lines: 1-4
+
+ mp: starts_with
+ target:
+ - {after: X, before: a}
+ - {after: Y, before: aa}
+
+ result: "{{ input | community.general.replace_keys(target=target, matching_parameter=mp) }}"
+
+gives
+
+.. code-block:: yaml
+ :emphasize-lines: 1-
+
+ result:
+ - {X: A, bbb1: B, ccc1: C}
+ - {X: D, bbb2: E, ccc2: F}
+
+
diff --git a/docs/docsite/rst/filter_guide-abstract_informations-lists_of_dictionaries.rst b/docs/docsite/rst/filter_guide-abstract_informations-lists_of_dictionaries.rst
new file mode 100644
index 0000000000..64a82536d8
--- /dev/null
+++ b/docs/docsite/rst/filter_guide-abstract_informations-lists_of_dictionaries.rst
@@ -0,0 +1,18 @@
+..
+ Copyright (c) Ansible Project
+ GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+ SPDX-License-Identifier: GPL-3.0-or-later
+
+.. _ansible_collections.community.general.docsite.filter_guide.filter_guide_abstract_informations.lists_of_dicts:
+
+Lists of dictionaries
+^^^^^^^^^^^^^^^^^^^^^
+
+Filters to manage keys in a list of dictionaries:
+
+.. toctree::
+ :maxdepth: 1
+
+ filter_guide-abstract_informations-lists_of_dictionaries-keep_keys
+ filter_guide-abstract_informations-lists_of_dictionaries-remove_keys
+ filter_guide-abstract_informations-lists_of_dictionaries-replace_keys
diff --git a/docs/docsite/rst/filter_guide.rst b/docs/docsite/rst/filter_guide.rst
index 1c6468ddec..da8a90af3c 100644
--- a/docs/docsite/rst/filter_guide.rst
+++ b/docs/docsite/rst/filter_guide.rst
@@ -8,7 +8,7 @@
community.general Filter Guide
==============================
-The :ref:`community.general collection ` offers several useful filter plugins.
+The :anscollection:`community.general collection ` offers several useful filter plugins.
.. toctree::
:maxdepth: 2
diff --git a/docs/docsite/rst/filter_guide_abstract_informations.rst b/docs/docsite/rst/filter_guide_abstract_informations.rst
index 8f997f1637..818c09f02c 100644
--- a/docs/docsite/rst/filter_guide_abstract_informations.rst
+++ b/docs/docsite/rst/filter_guide_abstract_informations.rst
@@ -11,5 +11,7 @@ Abstract transformations
filter_guide_abstract_informations_dictionaries
filter_guide_abstract_informations_grouping
+ filter_guide-abstract_informations-lists_of_dictionaries
filter_guide_abstract_informations_merging_lists_of_dictionaries
+ filter_guide_abstract_informations_lists_helper
filter_guide_abstract_informations_counting_elements_in_sequence
diff --git a/docs/docsite/rst/filter_guide_abstract_informations_counting_elements_in_sequence.rst b/docs/docsite/rst/filter_guide_abstract_informations_counting_elements_in_sequence.rst
index dcadd5a793..98e8eb1c4d 100644
--- a/docs/docsite/rst/filter_guide_abstract_informations_counting_elements_in_sequence.rst
+++ b/docs/docsite/rst/filter_guide_abstract_informations_counting_elements_in_sequence.rst
@@ -6,7 +6,7 @@
Counting elements in a sequence
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-The ``community.general.counter`` filter plugin allows you to count (hashable) elements in a sequence. Elements are returned as dictionary keys and their counts are stored as dictionary values.
+The :ansplugin:`community.general.counter filter plugin ` allows you to count (hashable) elements in a sequence. Elements are returned as dictionary keys and their counts are stored as dictionary values.
.. code-block:: yaml+jinja
diff --git a/docs/docsite/rst/filter_guide_abstract_informations_dictionaries.rst b/docs/docsite/rst/filter_guide_abstract_informations_dictionaries.rst
index 840bd1542c..e5b5bb7e36 100644
--- a/docs/docsite/rst/filter_guide_abstract_informations_dictionaries.rst
+++ b/docs/docsite/rst/filter_guide_abstract_informations_dictionaries.rst
@@ -6,7 +6,7 @@
Dictionaries
^^^^^^^^^^^^
-You can use the ``dict_kv`` filter to create a single-entry dictionary with ``value | community.general.dict_kv(key)``:
+You can use the :ansplugin:`community.general.dict_kv filter ` to create a single-entry dictionary with ``value | community.general.dict_kv(key)``:
.. code-block:: yaml+jinja
@@ -26,8 +26,8 @@ You can use the ``dict_kv`` filter to create a single-entry dictionary with ``va
type: host
database: all
myservers:
- - server1
- - server2
+ - server1
+ - server2
This produces:
@@ -58,7 +58,7 @@ This produces:
.. versionadded:: 2.0.0
-If you need to convert a list of key-value pairs to a dictionary, you can use the ``dict`` function. Unfortunately, this function cannot be used with ``map``. For this, the ``community.general.dict`` filter can be used:
+If you need to convert a list of key-value pairs to a dictionary, you can use the ``dict`` function. Unfortunately, this function cannot be used with ``map``. For this, the :ansplugin:`community.general.dict filter ` can be used:
.. code-block:: yaml+jinja
diff --git a/docs/docsite/rst/filter_guide_abstract_informations_grouping.rst b/docs/docsite/rst/filter_guide_abstract_informations_grouping.rst
index 2cea7f9bab..cb15989659 100644
--- a/docs/docsite/rst/filter_guide_abstract_informations_grouping.rst
+++ b/docs/docsite/rst/filter_guide_abstract_informations_grouping.rst
@@ -6,7 +6,7 @@
Grouping
^^^^^^^^
-If you have a list of dictionaries, the Jinja2 ``groupby`` filter allows to group the list by an attribute. This results in a list of ``(grouper, list)`` namedtuples, where ``list`` contains all dictionaries where the selected attribute equals ``grouper``. If you know that for every ``grouper``, there will be a most one entry in that list, you can use the ``community.general.groupby_as_dict`` filter to convert the original list into a dictionary which maps ``grouper`` to the corresponding dictionary.
+If you have a list of dictionaries, the Jinja2 ``groupby`` filter allows to group the list by an attribute. This results in a list of ``(grouper, list)`` namedtuples, where ``list`` contains all dictionaries where the selected attribute equals ``grouper``. If you know that for every ``grouper``, there will be a most one entry in that list, you can use the :ansplugin:`community.general.groupby_as_dict filter ` to convert the original list into a dictionary which maps ``grouper`` to the corresponding dictionary.
One example is ``ansible_facts.mounts``, which is a list of dictionaries where each has one ``device`` element to indicate the device which is mounted. Therefore, ``ansible_facts.mounts | community.general.groupby_as_dict('device')`` is a dictionary mapping a device to the mount information:
diff --git a/docs/docsite/rst/filter_guide_abstract_informations_lists_helper.rst b/docs/docsite/rst/filter_guide_abstract_informations_lists_helper.rst
new file mode 100644
index 0000000000..505320c79c
--- /dev/null
+++ b/docs/docsite/rst/filter_guide_abstract_informations_lists_helper.rst
@@ -0,0 +1,81 @@
+..
+ Copyright (c) Ansible Project
+ GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+ SPDX-License-Identifier: GPL-3.0-or-later
+
+Union, intersection and difference of lists
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Starting with Ansible Core 2.16, the builtin filters :ansplugin:`ansible.builtin.union#filter`, :ansplugin:`ansible.builtin.intersect#filter`, :ansplugin:`ansible.builtin.difference#filter` and :ansplugin:`ansible.builtin.symmetric_difference#filter` began to behave differently and do no longer preserve the item order. Items in the resulting lists are returned in arbitrary order and the order can vary between subsequent runs.
+
+The Ansible community.general collection provides the following additional list filters:
+
+- :ansplugin:`community.general.lists_union#filter`
+- :ansplugin:`community.general.lists_intersect#filter`
+- :ansplugin:`community.general.lists_difference#filter`
+- :ansplugin:`community.general.lists_symmetric_difference#filter`
+
+These filters preserve the item order, eliminate duplicates and are an extended version of the builtin ones, because they can operate on more than two lists.
+
+.. note:: Stick to the builtin filters, when item order is not important or when you do not need the n-ary operating mode. The builtin filters are faster, because they rely mostly on sets as their underlying datastructure.
+
+Let us use the lists below in the following examples:
+
+.. code-block:: yaml
+
+ A: [9, 5, 7, 1, 9, 4, 10, 5, 9, 7]
+ B: [4, 1, 2, 8, 3, 1, 7]
+ C: [10, 2, 1, 9, 1]
+
+The union of ``A`` and ``B`` can be written as:
+
+.. code-block:: yaml+jinja
+
+ result: "{{ A | community.general.lists_union(B) }}"
+
+This statement produces:
+
+.. code-block:: yaml
+
+ result: [9, 5, 7, 1, 4, 10, 2, 8, 3]
+
+If you want to calculate the intersection of ``A``, ``B`` and ``C``, you can use the following statement:
+
+.. code-block:: yaml+jinja
+
+ result: "{{ A | community.general.lists_intersect(B, C) }}"
+
+Alternatively, you can use a list of lists as an input of the filter
+
+.. code-block:: yaml+jinja
+
+ result: "{{ [A, B] | community.general.lists_intersect(C) }}"
+
+or
+
+.. code-block:: yaml+jinja
+
+ result: "{{ [A, B, C] | community.general.lists_intersect(flatten=true) }}"
+
+All three statements are equivalent and give:
+
+.. code-block:: yaml
+
+ result: [1]
+
+.. note:: Be aware that in most cases, filter calls without any argument require ``flatten=true``, otherwise the input is returned as result. The reason for this is, that the input is considered as a variable argument and is wrapped by an additional outer list. ``flatten=true`` ensures that this list is removed before the input is processed by the filter logic.
+
+The filters :ansplugin:`community.general.lists_difference#filter` or :ansplugin:`community.general.lists_symmetric_difference#filter` can be used in the same way as the filters in the examples above. They calculate the difference or the symmetric difference between two or more lists and preserve the item order.
+
+For example, the symmetric difference of ``A``, ``B`` and ``C`` may be written as:
+
+.. code-block:: yaml+jinja
+
+ result: "{{ A | community.general.lists_symmetric_difference(B, C) }}"
+
+This gives:
+
+.. code-block:: yaml
+
+ result: [5, 8, 3, 1]
+
diff --git a/docs/docsite/rst/filter_guide_abstract_informations_merging_lists_of_dictionaries.rst b/docs/docsite/rst/filter_guide_abstract_informations_merging_lists_of_dictionaries.rst
index 9b56e98d7e..cafe04e5c4 100644
--- a/docs/docsite/rst/filter_guide_abstract_informations_merging_lists_of_dictionaries.rst
+++ b/docs/docsite/rst/filter_guide_abstract_informations_merging_lists_of_dictionaries.rst
@@ -6,33 +6,30 @@
Merging lists of dictionaries
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-If you have two or more lists of dictionaries and want to combine them into a list of merged dictionaries, where the dictionaries are merged by an attribute, you can use the ``lists_mergeby`` filter.
+If you have two or more lists of dictionaries and want to combine them into a list of merged dictionaries, where the dictionaries are merged by an attribute, you can use the :ansplugin:`community.general.lists_mergeby ` filter.
-.. note:: The output of the examples in this section use the YAML callback plugin. Quoting: "Ansible output that can be quite a bit easier to read than the default JSON formatting." See :ref:`the documentation for the community.general.yaml callback plugin `.
+.. note:: The output of the examples in this section use the YAML callback plugin. Quoting: "Ansible output that can be quite a bit easier to read than the default JSON formatting." See the documentation for the :ansplugin:`community.general.yaml callback plugin `.
Let us use the lists below in the following examples:
.. code-block:: yaml
list1:
- - name: foo
- extra: true
- - name: bar
- extra: false
- - name: meh
- extra: true
+ - {name: foo, extra: true}
+ - {name: bar, extra: false}
+ - {name: meh, extra: true}
list2:
- - name: foo
- path: /foo
- - name: baz
- path: /baz
+ - {name: foo, path: /foo}
+ - {name: baz, path: /baz}
+Two lists
+"""""""""
In the example below the lists are merged by the attribute ``name``:
.. code-block:: yaml+jinja
- list3: "{{ list1|
+ list3: "{{ list1 |
community.general.lists_mergeby(list2, 'name') }}"
This produces:
@@ -40,24 +37,21 @@ This produces:
.. code-block:: yaml
list3:
- - extra: false
- name: bar
- - name: baz
- path: /baz
- - extra: true
- name: foo
- path: /foo
- - extra: true
- name: meh
+ - {name: bar, extra: false}
+ - {name: baz, path: /baz}
+ - {name: foo, extra: true, path: /foo}
+ - {name: meh, extra: true}
.. versionadded:: 2.0.0
+List of two lists
+"""""""""""""""""
It is possible to use a list of lists as an input of the filter:
.. code-block:: yaml+jinja
- list3: "{{ [list1, list2]|
+ list3: "{{ [list1, list2] |
community.general.lists_mergeby('name') }}"
This produces the same result as in the previous example:
@@ -65,26 +59,40 @@ This produces the same result as in the previous example:
.. code-block:: yaml
list3:
- - extra: false
- name: bar
- - name: baz
- path: /baz
- - extra: true
- name: foo
- path: /foo
- - extra: true
- name: meh
+ - {name: bar, extra: false}
+ - {name: baz, path: /baz}
+ - {name: foo, extra: true, path: /foo}
+ - {name: meh, extra: true}
+
+Single list
+"""""""""""
+It is possible to merge single list:
+
+.. code-block:: yaml+jinja
+
+ list3: "{{ [list1 + list2, []] |
+ community.general.lists_mergeby('name') }}"
+
+This produces the same result as in the previous example:
+
+.. code-block:: yaml
+
+ list3:
+ - {name: bar, extra: false}
+ - {name: baz, path: /baz}
+ - {name: foo, extra: true, path: /foo}
+ - {name: meh, extra: true}
-The filter also accepts two optional parameters: ``recursive`` and ``list_merge``. These parameters are only supported when used with ansible-base 2.10 or ansible-core, but not with Ansible 2.9. This is available since community.general 4.4.0.
+The filter also accepts two optional parameters: :ansopt:`community.general.lists_mergeby#filter:recursive` and :ansopt:`community.general.lists_mergeby#filter:list_merge`. This is available since community.general 4.4.0.
**recursive**
- Is a boolean, default to ``False``. Should the ``community.general.lists_mergeby`` recursively merge nested hashes. Note: It does not depend on the value of the ``hash_behaviour`` setting in ``ansible.cfg``.
+ Is a boolean, default to ``false``. Should the :ansplugin:`community.general.lists_mergeby#filter` filter recursively merge nested hashes. Note: It does not depend on the value of the ``hash_behaviour`` setting in ``ansible.cfg``.
**list_merge**
- Is a string, its possible values are ``replace`` (default), ``keep``, ``append``, ``prepend``, ``append_rp`` or ``prepend_rp``. It modifies the behaviour of ``community.general.lists_mergeby`` when the hashes to merge contain arrays/lists.
+ Is a string, its possible values are :ansval:`replace` (default), :ansval:`keep`, :ansval:`append`, :ansval:`prepend`, :ansval:`append_rp` or :ansval:`prepend_rp`. It modifies the behaviour of :ansplugin:`community.general.lists_mergeby#filter` when the hashes to merge contain arrays/lists.
-The examples below set ``recursive=true`` and display the differences among all six options of ``list_merge``. Functionality of the parameters is exactly the same as in the filter ``combine``. See :ref:`Combining hashes/dictionaries ` to learn details about these options.
+The examples below set :ansopt:`community.general.lists_mergeby#filter:recursive=true` and display the differences among all six options of :ansopt:`community.general.lists_mergeby#filter:list_merge`. Functionality of the parameters is exactly the same as in the filter :ansplugin:`ansible.builtin.combine#filter`. See :ref:`Combining hashes/dictionaries ` to learn details about these options.
Let us use the lists below in the following examples
@@ -95,8 +103,7 @@ Let us use the lists below in the following examples
param01:
x: default_value
y: default_value
- list:
- - default_value
+ list: [default_value]
- name: myname02
param01: [1, 1, 2, 3]
@@ -105,16 +112,17 @@ Let us use the lists below in the following examples
param01:
y: patch_value
z: patch_value
- list:
- - patch_value
+ list: [patch_value]
- name: myname02
- param01: [3, 4, 4, {key: value}]
+ param01: [3, 4, 4]
-Example ``list_merge=replace`` (default):
+list_merge=replace (default)
+""""""""""""""""""""""""""""
+Example :ansopt:`community.general.lists_mergeby#filter:list_merge=replace` (default):
.. code-block:: yaml+jinja
- list3: "{{ [list1, list2]|
+ list3: "{{ [list1, list2] |
community.general.lists_mergeby('name',
recursive=true) }}"
@@ -123,25 +131,22 @@ This produces:
.. code-block:: yaml
list3:
- - name: myname01
- param01:
- list:
- - patch_value
- x: default_value
- y: patch_value
- z: patch_value
- - name: myname02
- param01:
- - 3
- - 4
- - 4
- - key: value
+ - name: myname01
+ param01:
+ x: default_value
+ y: patch_value
+ list: [patch_value]
+ z: patch_value
+ - name: myname02
+ param01: [3, 4, 4]
-Example ``list_merge=keep``:
+list_merge=keep
+"""""""""""""""
+Example :ansopt:`community.general.lists_mergeby#filter:list_merge=keep`:
.. code-block:: yaml+jinja
- list3: "{{ [list1, list2]|
+ list3: "{{ [list1, list2] |
community.general.lists_mergeby('name',
recursive=true,
list_merge='keep') }}"
@@ -151,25 +156,22 @@ This produces:
.. code-block:: yaml
list3:
- - name: myname01
- param01:
- list:
- - default_value
- x: default_value
- y: patch_value
- z: patch_value
- - name: myname02
- param01:
- - 1
- - 1
- - 2
- - 3
+ - name: myname01
+ param01:
+ x: default_value
+ y: patch_value
+ list: [default_value]
+ z: patch_value
+ - name: myname02
+ param01: [1, 1, 2, 3]
-Example ``list_merge=append``:
+list_merge=append
+"""""""""""""""""
+Example :ansopt:`community.general.lists_mergeby#filter:list_merge=append`:
.. code-block:: yaml+jinja
- list3: "{{ [list1, list2]|
+ list3: "{{ [list1, list2] |
community.general.lists_mergeby('name',
recursive=true,
list_merge='append') }}"
@@ -179,30 +181,22 @@ This produces:
.. code-block:: yaml
list3:
- - name: myname01
- param01:
- list:
- - default_value
- - patch_value
- x: default_value
- y: patch_value
- z: patch_value
- - name: myname02
- param01:
- - 1
- - 1
- - 2
- - 3
- - 3
- - 4
- - 4
- - key: value
+ - name: myname01
+ param01:
+ x: default_value
+ y: patch_value
+ list: [default_value, patch_value]
+ z: patch_value
+ - name: myname02
+ param01: [1, 1, 2, 3, 3, 4, 4]
-Example ``list_merge=prepend``:
+list_merge=prepend
+""""""""""""""""""
+Example :ansopt:`community.general.lists_mergeby#filter:list_merge=prepend`:
.. code-block:: yaml+jinja
- list3: "{{ [list1, list2]|
+ list3: "{{ [list1, list2] |
community.general.lists_mergeby('name',
recursive=true,
list_merge='prepend') }}"
@@ -212,30 +206,22 @@ This produces:
.. code-block:: yaml
list3:
- - name: myname01
- param01:
- list:
- - patch_value
- - default_value
- x: default_value
- y: patch_value
- z: patch_value
- - name: myname02
- param01:
- - 3
- - 4
- - 4
- - key: value
- - 1
- - 1
- - 2
- - 3
+ - name: myname01
+ param01:
+ x: default_value
+ y: patch_value
+ list: [patch_value, default_value]
+ z: patch_value
+ - name: myname02
+ param01: [3, 4, 4, 1, 1, 2, 3]
-Example ``list_merge=append_rp``:
+list_merge=append_rp
+""""""""""""""""""""
+Example :ansopt:`community.general.lists_mergeby#filter:list_merge=append_rp`:
.. code-block:: yaml+jinja
- list3: "{{ [list1, list2]|
+ list3: "{{ [list1, list2] |
community.general.lists_mergeby('name',
recursive=true,
list_merge='append_rp') }}"
@@ -245,29 +231,22 @@ This produces:
.. code-block:: yaml
list3:
- - name: myname01
- param01:
- list:
- - default_value
- - patch_value
- x: default_value
- y: patch_value
- z: patch_value
- - name: myname02
- param01:
- - 1
- - 1
- - 2
- - 3
- - 4
- - 4
- - key: value
+ - name: myname01
+ param01:
+ x: default_value
+ y: patch_value
+ list: [default_value, patch_value]
+ z: patch_value
+ - name: myname02
+ param01: [1, 1, 2, 3, 4, 4]
-Example ``list_merge=prepend_rp``:
+list_merge=prepend_rp
+"""""""""""""""""""""
+Example :ansopt:`community.general.lists_mergeby#filter:list_merge=prepend_rp`:
.. code-block:: yaml+jinja
- list3: "{{ [list1, list2]|
+ list3: "{{ [list1, list2] |
community.general.lists_mergeby('name',
recursive=true,
list_merge='prepend_rp') }}"
@@ -277,21 +256,12 @@ This produces:
.. code-block:: yaml
list3:
- - name: myname01
- param01:
- list:
- - patch_value
- - default_value
- x: default_value
- y: patch_value
- z: patch_value
- - name: myname02
- param01:
- - 3
- - 4
- - 4
- - key: value
- - 1
- - 1
- - 2
+ - name: myname01
+ param01:
+ x: default_value
+ y: patch_value
+ list: [patch_value, default_value]
+ z: patch_value
+ - name: myname02
+ param01: [3, 4, 4, 1, 1, 2]
diff --git a/docs/docsite/rst/filter_guide_conversions.rst b/docs/docsite/rst/filter_guide_conversions.rst
index 78970c17b9..ca0401762c 100644
--- a/docs/docsite/rst/filter_guide_conversions.rst
+++ b/docs/docsite/rst/filter_guide_conversions.rst
@@ -9,7 +9,7 @@ Conversions
Parsing CSV files
^^^^^^^^^^^^^^^^^
-Ansible offers the :ref:`community.general.read_csv module ` to read CSV files. Sometimes you need to convert strings to CSV files instead. For this, the ``from_csv`` filter exists.
+Ansible offers the :ansplugin:`community.general.read_csv module ` to read CSV files. Sometimes you need to convert strings to CSV files instead. For this, the :ansplugin:`community.general.from_csv filter ` exists.
.. code-block:: yaml+jinja
@@ -42,7 +42,7 @@ This produces:
]
}
-The ``from_csv`` filter has several keyword arguments to control its behavior:
+The :ansplugin:`community.general.from_csv filter ` has several keyword arguments to control its behavior:
:dialect: Dialect of the CSV file. Default is ``excel``. Other possible choices are ``excel-tab`` and ``unix``. If one of ``delimiter``, ``skipinitialspace`` or ``strict`` is specified, ``dialect`` is ignored.
:fieldnames: A set of column names to use. If not provided, the first line of the CSV is assumed to contain the column names.
@@ -55,7 +55,7 @@ The ``from_csv`` filter has several keyword arguments to control its behavior:
Converting to JSON
^^^^^^^^^^^^^^^^^^
-`JC `_ is a CLI tool and Python library which allows to interpret output of various CLI programs as JSON. It is also available as a filter in community.general. This filter needs the `jc Python library `_ installed on the controller.
+`JC `_ is a CLI tool and Python library which allows to interpret output of various CLI programs as JSON. It is also available as a filter in community.general, called :ansplugin:`community.general.jc#filter`. This filter needs the `jc Python library `_ installed on the controller.
.. code-block:: yaml+jinja
diff --git a/docs/docsite/rst/filter_guide_creating_identifiers.rst b/docs/docsite/rst/filter_guide_creating_identifiers.rst
index af0a8b7bab..6e0c730c60 100644
--- a/docs/docsite/rst/filter_guide_creating_identifiers.rst
+++ b/docs/docsite/rst/filter_guide_creating_identifiers.rst
@@ -11,7 +11,7 @@ The following filters allow to create identifiers.
Hashids
^^^^^^^
-`Hashids `_ allow to convert sequences of integers to short unique string identifiers. This filter needs the `hashids Python library `_ installed on the controller.
+`Hashids `_ allow to convert sequences of integers to short unique string identifiers. The :ansplugin:`community.general.hashids_encode#filter` and :ansplugin:`community.general.hashids_decode#filter` filters need the `hashids Python library `_ installed on the controller.
.. code-block:: yaml+jinja
@@ -52,7 +52,7 @@ The hashids filters accept keyword arguments to allow fine-tuning the hashids ge
Random MACs
^^^^^^^^^^^
-You can use the ``random_mac`` filter to complete a partial `MAC address `_ to a random 6-byte MAC address.
+You can use the :ansplugin:`community.general.random_mac filter ` to complete a partial `MAC address `_ to a random 6-byte MAC address.
.. code-block:: yaml+jinja
diff --git a/docs/docsite/rst/filter_guide_paths.rst b/docs/docsite/rst/filter_guide_paths.rst
index dac8931454..41185832f2 100644
--- a/docs/docsite/rst/filter_guide_paths.rst
+++ b/docs/docsite/rst/filter_guide_paths.rst
@@ -6,14 +6,4 @@
Paths
-----
-The ``path_join`` filter has been added in ansible-base 2.10. If you want to use this filter, but also need to support Ansible 2.9, you can use ``community.general``'s ``path_join`` shim, ``community.general.path_join``. This filter redirects to ``path_join`` for ansible-base 2.10 and ansible-core 2.11 or newer, and re-implements the filter for Ansible 2.9.
-
-.. code-block:: yaml+jinja
-
- # ansible-base 2.10 or newer:
- path: {{ ('/etc', path, 'subdir', file) | path_join }}
-
- # Also works with Ansible 2.9:
- path: {{ ('/etc', path, 'subdir', file) | community.general.path_join }}
-
-.. versionadded:: 3.0.0
+The :ansplugin:`ansible.builtin.path_join filter ` has been added in ansible-base 2.10. Community.general 3.0.0 and newer contains an alias ``community.general.path_join`` for this filter that could be used on Ansible 2.9 as well. Since community.general no longer supports Ansible 2.9, this is now a simple redirect to :ansplugin:`ansible.builtin.path_join filter `.
diff --git a/docs/docsite/rst/filter_guide_selecting_json_data.rst b/docs/docsite/rst/filter_guide_selecting_json_data.rst
index d8de07b926..bdf2624f3c 100644
--- a/docs/docsite/rst/filter_guide_selecting_json_data.rst
+++ b/docs/docsite/rst/filter_guide_selecting_json_data.rst
@@ -8,7 +8,7 @@
Selecting JSON data: JSON queries
---------------------------------
-To select a single element or a data subset from a complex data structure in JSON format (for example, Ansible facts), use the ``json_query`` filter. The ``json_query`` filter lets you query a complex JSON structure and iterate over it using a loop structure.
+To select a single element or a data subset from a complex data structure in JSON format (for example, Ansible facts), use the :ansplugin:`community.general.json_query filter `. The :ansplugin:`community.general.json_query#filter` filter lets you query a complex JSON structure and iterate over it using a loop structure.
.. note:: You must manually install the **jmespath** dependency on the Ansible controller before using this filter. This filter is built upon **jmespath**, and you can use the same syntax. For examples, see `jmespath examples `_.
@@ -17,50 +17,50 @@ Consider this data structure:
.. code-block:: yaml+jinja
{
- "domain_definition": {
- "domain": {
- "cluster": [
- {
- "name": "cluster1"
- },
- {
- "name": "cluster2"
- }
- ],
- "server": [
- {
- "name": "server11",
- "cluster": "cluster1",
- "port": "8080"
- },
- {
- "name": "server12",
- "cluster": "cluster1",
- "port": "8090"
- },
- {
- "name": "server21",
- "cluster": "cluster2",
- "port": "9080"
- },
- {
- "name": "server22",
- "cluster": "cluster2",
- "port": "9090"
- }
- ],
- "library": [
- {
- "name": "lib1",
- "target": "cluster1"
- },
- {
- "name": "lib2",
- "target": "cluster2"
- }
- ]
+ "domain_definition": {
+ "domain": {
+ "cluster": [
+ {
+ "name": "cluster1"
+ },
+ {
+ "name": "cluster2"
}
+ ],
+ "server": [
+ {
+ "name": "server11",
+ "cluster": "cluster1",
+ "port": "8080"
+ },
+ {
+ "name": "server12",
+ "cluster": "cluster1",
+ "port": "8090"
+ },
+ {
+ "name": "server21",
+ "cluster": "cluster2",
+ "port": "9080"
+ },
+ {
+ "name": "server22",
+ "cluster": "cluster2",
+ "port": "9090"
+ }
+ ],
+ "library": [
+ {
+ "name": "lib1",
+ "target": "cluster1"
+ },
+ {
+ "name": "lib2",
+ "target": "cluster2"
+ }
+ ]
}
+ }
}
To extract all clusters from this structure, you can use the following query:
@@ -124,7 +124,7 @@ To get a hash map with all ports and names of a cluster:
var: item
loop: "{{ domain_definition | community.general.json_query(server_name_cluster1_query) }}"
vars:
- server_name_cluster1_query: "domain.server[?cluster=='cluster2'].{name: name, port: port}"
+ server_name_cluster1_query: "domain.server[?cluster=='cluster1'].{name: name, port: port}"
To extract ports from all clusters with name starting with 'server1':
@@ -146,4 +146,4 @@ To extract ports from all clusters with name containing 'server1':
vars:
server_name_query: "domain.server[?contains(name,'server1')].port"
-.. note:: while using ``starts_with`` and ``contains``, you have to use `` to_json | from_json `` filter for correct parsing of data structure.
+.. note:: while using ``starts_with`` and ``contains``, you have to use ``to_json | from_json`` filter for correct parsing of data structure.
diff --git a/docs/docsite/rst/filter_guide_working_with_times.rst b/docs/docsite/rst/filter_guide_working_with_times.rst
index dc68f2a2e3..032d44bb57 100644
--- a/docs/docsite/rst/filter_guide_working_with_times.rst
+++ b/docs/docsite/rst/filter_guide_working_with_times.rst
@@ -6,9 +6,9 @@
Working with times
------------------
-The ``to_time_unit`` filter allows to convert times from a human-readable string to a unit. For example, ``'4h 30min 12second' | community.general.to_time_unit('hour')`` gives the number of hours that correspond to 4 hours, 30 minutes and 12 seconds.
+The :ansplugin:`community.general.to_time_unit filter ` allows to convert times from a human-readable string to a unit. For example, ``'4h 30min 12second' | community.general.to_time_unit('hour')`` gives the number of hours that correspond to 4 hours, 30 minutes and 12 seconds.
-There are shorthands to directly convert to various units, like ``to_hours``, ``to_minutes``, ``to_seconds``, and so on. The following table lists all units that can be used:
+There are shorthands to directly convert to various units, like :ansplugin:`community.general.to_hours#filter`, :ansplugin:`community.general.to_minutes#filter`, :ansplugin:`community.general.to_seconds#filter`, and so on. The following table lists all units that can be used:
.. list-table:: Units
:widths: 25 25 25 25
@@ -21,37 +21,37 @@ There are shorthands to directly convert to various units, like ``to_hours``, ``
* - Millisecond
- 1/1000 second
- ``ms``, ``millisecond``, ``milliseconds``, ``msec``, ``msecs``, ``msecond``, ``mseconds``
- - ``to_milliseconds``
+ - :ansplugin:`community.general.to_milliseconds#filter`
* - Second
- 1 second
- ``s``, ``sec``, ``secs``, ``second``, ``seconds``
- - ``to_seconds``
+ - :ansplugin:`community.general.to_seconds#filter`
* - Minute
- 60 seconds
- ``m``, ``min``, ``mins``, ``minute``, ``minutes``
- - ``to_minutes``
+ - :ansplugin:`community.general.to_minutes#filter`
* - Hour
- 60*60 seconds
- ``h``, ``hour``, ``hours``
- - ``to_hours``
+ - :ansplugin:`community.general.to_hours#filter`
* - Day
- 24*60*60 seconds
- ``d``, ``day``, ``days``
- - ``to_days``
+ - :ansplugin:`community.general.to_days#filter`
* - Week
- 7*24*60*60 seconds
- ``w``, ``week``, ``weeks``
- - ``to_weeks``
+ - :ansplugin:`community.general.to_weeks#filter`
* - Month
- 30*24*60*60 seconds
- ``mo``, ``month``, ``months``
- - ``to_months``
+ - :ansplugin:`community.general.to_months#filter`
* - Year
- 365*24*60*60 seconds
- ``y``, ``year``, ``years``
- - ``to_years``
+ - :ansplugin:`community.general.to_years#filter`
-Note that months and years are using a simplified representation: a month is 30 days, and a year is 365 days. If you need different definitions of months or years, you can pass them as keyword arguments. For example, if you want a year to be 365.25 days, and a month to be 30.5 days, you can write ``'11months 4' | community.general.to_years(year=365.25, month=30.5)``. These keyword arguments can be specified to ``to_time_unit`` and to all shorthand filters.
+Note that months and years are using a simplified representation: a month is 30 days, and a year is 365 days. If you need different definitions of months or years, you can pass them as keyword arguments. For example, if you want a year to be 365.25 days, and a month to be 30.5 days, you can write ``'11months 4' | community.general.to_years(year=365.25, month=30.5)``. These keyword arguments can be specified to :ansplugin:`community.general.to_time_unit#filter` and to all shorthand filters.
.. code-block:: yaml+jinja
diff --git a/docs/docsite/rst/filter_guide_working_with_unicode.rst b/docs/docsite/rst/filter_guide_working_with_unicode.rst
index 2e5a67f8fa..e75b0f871b 100644
--- a/docs/docsite/rst/filter_guide_working_with_unicode.rst
+++ b/docs/docsite/rst/filter_guide_working_with_unicode.rst
@@ -6,9 +6,9 @@
Working with Unicode
---------------------
-`Unicode `_ makes it possible to produce two strings which may be visually equivalent, but are comprised of distinctly different characters/character sequences. To address this ``Unicode`` defines `normalization forms `_ which avoid these distinctions by choosing a unique character sequence for a given visual representation.
+`Unicode `_ makes it possible to produce two strings which may be visually equivalent, but are comprised of distinctly different characters/character sequences. To address this Unicode defines `normalization forms `_ which avoid these distinctions by choosing a unique character sequence for a given visual representation.
-You can use the ``community.general.unicode_normalize`` filter to normalize ``Unicode`` strings within your playbooks.
+You can use the :ansplugin:`community.general.unicode_normalize filter ` to normalize Unicode strings within your playbooks.
.. code-block:: yaml+jinja
@@ -28,7 +28,7 @@ This produces:
"msg": true
}
-The ``community.general.unicode_normalize`` filter accepts a keyword argument to select the ``Unicode`` form used to normalize the input string.
+The :ansplugin:`community.general.unicode_normalize filter ` accepts a keyword argument :ansopt:`community.general.unicode_normalize#filter:form` to select the Unicode form used to normalize the input string.
:form: One of ``'NFC'`` (default), ``'NFD'``, ``'NFKC'``, or ``'NFKD'``. See the `Unicode reference `_ for more information.
diff --git a/docs/docsite/rst/filter_guide_working_with_versions.rst b/docs/docsite/rst/filter_guide_working_with_versions.rst
index 2488427b73..055bbcd217 100644
--- a/docs/docsite/rst/filter_guide_working_with_versions.rst
+++ b/docs/docsite/rst/filter_guide_working_with_versions.rst
@@ -6,7 +6,7 @@
Working with versions
---------------------
-If you need to sort a list of version numbers, the Jinja ``sort`` filter is problematic. Since it sorts lexicographically, ``2.10`` will come before ``2.9``. To treat version numbers correctly, you can use the ``version_sort`` filter:
+If you need to sort a list of version numbers, the Jinja ``sort`` filter is problematic. Since it sorts lexicographically, ``2.10`` will come before ``2.9``. To treat version numbers correctly, you can use the :ansplugin:`community.general.version_sort filter `:
.. code-block:: yaml+jinja
diff --git a/docs/docsite/rst/guide_alicloud.rst b/docs/docsite/rst/guide_alicloud.rst
new file mode 100644
index 0000000000..b5ce2c063c
--- /dev/null
+++ b/docs/docsite/rst/guide_alicloud.rst
@@ -0,0 +1,96 @@
+..
+ Copyright (c) Ansible Project
+ GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+ SPDX-License-Identifier: GPL-3.0-or-later
+
+.. _ansible_collections.community.general.docsite.guide_alicloud:
+
+Alibaba Cloud Compute Services Guide
+====================================
+
+Introduction
+````````````
+
+The community.general collection contains several modules for controlling and managing Alibaba Cloud Compute Services (Alicloud). This guide
+explains how to use the Alicloud Ansible modules together.
+
+All Alicloud modules require ``footmark`` - install it on your control machine with ``pip install footmark``.
+
+Cloud modules, including Alicloud modules, are usually executed on your local machine (the control machine) with ``connection: local``, rather than on remote machines defined in your hosts.
+
+Normally, you'll use the following pattern for plays that provision Alicloud resources:
+
+.. code-block:: yaml
+
+ - hosts: localhost
+ connection: local
+ vars:
+ - ...
+ tasks:
+ - ...
+
+Authentication
+``````````````
+
+You can specify your Alicloud authentication credentials (access key and secret key) by passing them as
+environment variables or by storing them in a vars file.
+
+To pass authentication credentials as environment variables:
+
+.. code-block:: console
+
+ export ALICLOUD_ACCESS_KEY='Alicloud123'
+ export ALICLOUD_SECRET_KEY='AlicloudSecret123'
+
+To store authentication credentials in a vars file, encrypt them with :ref:`Ansible Vault ` to keep them secure, then list them:
+
+.. code-block:: yaml
+
+ ---
+ alicloud_access_key: "--REMOVED--"
+ alicloud_secret_key: "--REMOVED--"
+
+Note that if you store your credentials in a vars file, you need to refer to them in each Alicloud module. For example:
+
+.. code-block:: yaml+jinja
+
+ - community.general.ali_instance:
+ alicloud_access_key: "{{ alicloud_access_key }}"
+ alicloud_secret_key: "{{ alicloud_secret_key }}"
+ image_id: "..."
+
+Provisioning
+````````````
+
+Alicloud modules create Alicloud ECS instances (:ansplugin:`community.general.ali_instance#module`) and retrieve information on these (:ansplugin:`community.general.ali_instance_info#module`).
+
+You can use the ``count`` parameter to control the number of resources you create or terminate. For example, if you want exactly 5 instances tagged ``NewECS``, set the ``count`` of instances to 5 and the ``count_tag`` to ``NewECS``, as shown in the last task of the example playbook below. If there are no instances with the tag ``NewECS``, the task creates 5 new instances. If there are 2 instances with that tag, the task creates 3 more. If there are 8 instances with that tag, the task terminates 3 of those instances.
+
+If you do not specify a ``count_tag``, the task creates the number of instances you specify in ``count`` with the ``instance_name`` you provide.
+
+.. code-block:: yaml+jinja
+
+ # alicloud_setup.yml
+
+ - hosts: localhost
+ connection: local
+
+ tasks:
+ - name: Create a set of instances
+ community.general.ali_instance:
+ instance_type: ecs.n4.small
+ image_id: "{{ ami_id }}"
+ instance_name: "My-new-instance"
+ instance_tags:
+ Name: NewECS
+ Version: 0.0.1
+ count: 5
+ count_tag:
+ Name: NewECS
+ allocate_public_ip: true
+ max_bandwidth_out: 50
+ register: create_instance
+
+In the example playbook above, data about the instances created by this playbook is saved in the variable defined by the ``register`` keyword in the task.
+
+Each Alicloud module offers a variety of parameter options. Not all options are demonstrated in the above example. See each individual module for further details and examples.
diff --git a/docs/docsite/rst/guide_cmdrunner.rst b/docs/docsite/rst/guide_cmdrunner.rst
new file mode 100644
index 0000000000..c1514ee340
--- /dev/null
+++ b/docs/docsite/rst/guide_cmdrunner.rst
@@ -0,0 +1,529 @@
+..
+ Copyright (c) Ansible Project
+ GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+ SPDX-License-Identifier: GPL-3.0-or-later
+
+.. _ansible_collections.community.general.docsite.guide_cmdrunner:
+
+
+Command Runner guide
+====================
+
+
+Introduction
+^^^^^^^^^^^^
+
+The ``ansible_collections.community.general.plugins.module_utils.cmd_runner`` module util provides the
+``CmdRunner`` class to help execute external commands. The class is a wrapper around
+the standard ``AnsibleModule.run_command()`` method, handling command arguments, localization setting,
+output processing output, check mode, and other features.
+
+It is even more useful when one command is used in multiple modules, so that you can define all options
+in a module util file, and each module uses the same runner with different arguments.
+
+For the sake of clarity, throughout this guide, unless otherwise specified, we use the term *option* when referring to
+Ansible module options, and the term *argument* when referring to the command line arguments for the external command.
+
+
+Quickstart
+""""""""""
+
+``CmdRunner`` defines a command and a set of coded instructions on how to format
+the command-line arguments, in which specific order, for a particular execution.
+It relies on ``ansible.module_utils.basic.AnsibleModule.run_command()`` to actually execute the command.
+There are other features, see more details throughout this document.
+
+To use ``CmdRunner`` you must start by creating an object. The example below is a simplified
+version of the actual code in :ansplugin:`community.general.ansible_galaxy_install#module`:
+
+.. code-block:: python
+
+ from ansible_collections.community.general.plugins.module_utils.cmd_runner import CmdRunner, cmd_runner_fmt
+
+ runner = CmdRunner(
+ module,
+ command="ansible-galaxy",
+ arg_formats=dict(
+ type=cmd_runner_fmt.as_func(lambda v: [] if v == 'both' else [v]),
+ galaxy_cmd=cmd_runner_fmt.as_list(),
+ upgrade=cmd_runner_fmt.as_bool("--upgrade"),
+ requirements_file=cmd_runner_fmt.as_opt_val('-r'),
+ dest=cmd_runner_fmt.as_opt_val('-p'),
+ force=cmd_runner_fmt.as_bool("--force"),
+ no_deps=cmd_runner_fmt.as_bool("--no-deps"),
+ version=cmd_runner_fmt.as_fixed("--version"),
+ name=cmd_runner_fmt.as_list(),
+ )
+ )
+
+This is meant to be done once, then every time you need to execute the command you create a context and pass values as needed:
+
+.. code-block:: python
+
+ # Run the command with these arguments, when values exist for them
+ with runner("type galaxy_cmd upgrade force no_deps dest requirements_file name", output_process=process) as ctx:
+ ctx.run(galaxy_cmd="install", upgrade=upgrade)
+
+ # version is fixed, requires no value
+ with runner("version") as ctx:
+ dummy, stdout, dummy = ctx.run()
+
+ # passes arg 'data' to AnsibleModule.run_command()
+ with runner("type name", data=stdin_data) as ctx:
+ dummy, stdout, dummy = ctx.run()
+
+ # Another way of expressing it
+ dummy, stdout, dummy = runner("version").run()
+
+Note that you can pass values for the arguments when calling ``run()``, otherwise ``CmdRunner``
+uses the module options with the exact same names to provide values for the runner arguments.
+If no value is passed and no module option is found for the name specified, then an exception is raised, unless
+the argument is using ``cmd_runner_fmt.as_fixed`` as format function like the ``version`` in the example above.
+See more about it below.
+
+In the first example, values of ``type``, ``force``, ``no_deps`` and others
+are taken straight from the module, whilst ``galaxy_cmd`` and ``upgrade`` are
+passed explicitly.
+
+.. note::
+
+ It is not possible to automatically retrieve values of suboptions.
+
+That generates a resulting command line similar to (example taken from the
+output of an integration test):
+
+.. code-block:: python
+
+ [
+ "/bin/ansible-galaxy",
+ "collection",
+ "install",
+ "--upgrade",
+ "-p",
+ "",
+ "netbox.netbox",
+ ]
+
+
+Argument formats
+^^^^^^^^^^^^^^^^
+
+As seen in the example, ``CmdRunner`` expects a parameter named ``arg_formats``
+defining how to format each CLI named argument.
+An "argument format" is nothing but a function to transform the value of a variable
+into something formatted for the command line.
+
+
+Argument format function
+""""""""""""""""""""""""
+
+An ``arg_format`` function is defined in the form similar to:
+
+.. code-block:: python
+
+ def func(value):
+ return ["--some-param-name", value]
+
+The parameter ``value`` can be of any type - although there are convenience
+mechanisms to help handling sequence and mapping objects.
+
+The result is expected to be of the type ``Sequence[str]`` type (most commonly
+``list[str]`` or ``tuple[str]``), otherwise it is considered to be a ``str``,
+and it is coerced into ``list[str]``.
+This resulting sequence of strings is added to the command line when that
+argument is actually used.
+
+For example, if ``func`` returns:
+
+- ``["nee", 2, "shruberries"]``, the command line adds arguments ``"nee" "2" "shruberries"``.
+- ``2 == 2``, the command line adds argument ``True``.
+- ``None``, the command line adds argument ``None``.
+- ``[]``, the command line adds no command line argument for that particular argument.
+
+
+Convenience format methods
+""""""""""""""""""""""""""
+
+In the same module as ``CmdRunner`` there is a class ``cmd_runner_fmt`` which
+provides a set of convenience methods that return format functions for common cases.
+In the first block of code in the `Quickstart`_ section you can see the importing of
+that class:
+
+.. code-block:: python
+
+ from ansible_collections.community.general.plugins.module_utils.cmd_runner import CmdRunner, cmd_runner_fmt
+
+The same example shows how to make use of some of them in the instantiation of the ``CmdRunner`` object.
+A description of each one of the convenience methods available and examples of how to use them is found below.
+In these descriptions ``value`` refers to the single parameter passed to the formatting function.
+
+- ``cmd_runner_fmt.as_list()``
+ This method does not receive any parameter, function returns ``value`` as-is.
+
+ - Creation:
+ ``cmd_runner_fmt.as_list()``
+ - Examples:
+ +----------------------+---------------------+
+ | Value | Outcome |
+ +======================+=====================+
+ | ``["foo", "bar"]`` | ``["foo", "bar"]`` |
+ +----------------------+---------------------+
+ | ``"foobar"`` | ``["foobar"]`` |
+ +----------------------+---------------------+
+
+- ``cmd_runner_fmt.as_bool()``
+ This method receives two different parameters: ``args_true`` and ``args_false``, latter being optional.
+ If the boolean evaluation of ``value`` is ``True``, the format function returns ``args_true``.
+ If the boolean evaluation is ``False``, then the function returns ``args_false`` if it was provided, or ``[]`` otherwise.
+
+ - Creation (one arg):
+ ``cmd_runner_fmt.as_bool("--force")``
+ - Examples:
+ +------------+--------------------+
+ | Value | Outcome |
+ +============+====================+
+ | ``True`` | ``["--force"]`` |
+ +------------+--------------------+
+ | ``False`` | ``[]`` |
+ +------------+--------------------+
+ - Creation (two args, ``None`` treated as ``False``):
+ ``cmd_runner_fmt.as_bool("--relax", "--dont-do-it")``
+ - Examples:
+ +------------+----------------------+
+ | Value | Outcome |
+ +============+======================+
+ | ``True`` | ``["--relax"]`` |
+ +------------+----------------------+
+ | ``False`` | ``["--dont-do-it"]`` |
+ +------------+----------------------+
+ | | ``["--dont-do-it"]`` |
+ +------------+----------------------+
+ - Creation (two args, ``None`` is ignored):
+ ``cmd_runner_fmt.as_bool("--relax", "--dont-do-it", ignore_none=True)``
+ - Examples:
+ +------------+----------------------+
+ | Value | Outcome |
+ +============+======================+
+ | ``True`` | ``["--relax"]`` |
+ +------------+----------------------+
+ | ``False`` | ``["--dont-do-it"]`` |
+ +------------+----------------------+
+ | | ``[]`` |
+ +------------+----------------------+
+
+- ``cmd_runner_fmt.as_bool_not()``
+ This method receives one parameter, which is returned by the function when the boolean evaluation
+ of ``value`` is ``False``.
+
+ - Creation:
+ ``cmd_runner_fmt.as_bool_not("--no-deps")``
+ - Examples:
+ +-------------+---------------------+
+ | Value | Outcome |
+ +=============+=====================+
+ | ``True`` | ``[]`` |
+ +-------------+---------------------+
+ | ``False`` | ``["--no-deps"]`` |
+ +-------------+---------------------+
+
+- ``cmd_runner_fmt.as_optval()``
+ This method receives one parameter ``arg``, the function returns the string concatenation
+ of ``arg`` and ``value``.
+
+ - Creation:
+ ``cmd_runner_fmt.as_optval("-i")``
+ - Examples:
+ +---------------+---------------------+
+ | Value | Outcome |
+ +===============+=====================+
+ | ``3`` | ``["-i3"]`` |
+ +---------------+---------------------+
+ | ``foobar`` | ``["-ifoobar"]`` |
+ +---------------+---------------------+
+
+- ``cmd_runner_fmt.as_opt_val()``
+ This method receives one parameter ``arg``, the function returns ``[arg, value]``.
+
+ - Creation:
+ ``cmd_runner_fmt.as_opt_val("--name")``
+ - Examples:
+ +--------------+--------------------------+
+ | Value | Outcome |
+ +==============+==========================+
+ | ``abc`` | ``["--name", "abc"]`` |
+ +--------------+--------------------------+
+
+- ``cmd_runner_fmt.as_opt_eq_val()``
+ This method receives one parameter ``arg``, the function returns the string of the form
+ ``{arg}={value}``.
+
+ - Creation:
+ ``cmd_runner_fmt.as_opt_eq_val("--num-cpus")``
+ - Examples:
+ +------------+-------------------------+
+ | Value | Outcome |
+ +============+=========================+
+ | ``10`` | ``["--num-cpus=10"]`` |
+ +------------+-------------------------+
+
+- ``cmd_runner_fmt.as_fixed()``
+ This method defines one or more fixed arguments that are returned by the generated function
+ regardless whether ``value`` is passed to it or not.
+
+ This method accepts these arguments in one of three forms:
+
+ * one scalar parameter ``arg``, which will be returned as ``[arg]`` by the function, or
+ * one sequence parameter, such as a list, ``arg``, which will be returned by the function as ``arg[0]``, or
+ * multiple parameters ``args``, which will be returned as ``args`` directly by the function.
+
+ See the examples below for each one of those forms. And, stressing that the generated function expects no ``value`` - if one
+ is provided then it is ignored.
+
+ - Creation (one scalar argument):
+ * ``cmd_runner_fmt.as_fixed("--version")``
+ - Examples:
+ +---------+--------------------------------------+
+ | Value | Outcome |
+ +=========+======================================+
+ | | * ``["--version"]`` |
+ +---------+--------------------------------------+
+ | 57 | * ``["--version"]`` |
+ +---------+--------------------------------------+
+
+ - Creation (one sequence argument):
+ * ``cmd_runner_fmt.as_fixed(["--list", "--json"])``
+ - Examples:
+ +---------+--------------------------------------+
+ | Value | Outcome |
+ +=========+======================================+
+ | | * ``["--list", "--json"]`` |
+ +---------+--------------------------------------+
+ | True | * ``["--list", "--json"]`` |
+ +---------+--------------------------------------+
+
+ - Creation (multiple arguments):
+ * ``cmd_runner_fmt.as_fixed("--one", "--two", "--three")``
+ - Examples:
+ +---------+--------------------------------------+
+ | Value | Outcome |
+ +=========+======================================+
+ | | * ``["--one", "--two", "--three"]`` |
+ +---------+--------------------------------------+
+ | False | * ``["--one", "--two", "--three"]`` |
+ +---------+--------------------------------------+
+
+ - Note:
+ This is the only special case in which a value can be missing for the formatting function.
+ The first example here comes from the code in `Quickstart`_.
+ In that case, the module has code to determine the command's version so that it can assert compatibility.
+ There is no *value* to be passed for that CLI argument.
+
+- ``cmd_runner_fmt.as_map()``
+ This method receives one parameter ``arg`` which must be a dictionary, and an optional parameter ``default``.
+ The function returns the evaluation of ``arg[value]``.
+ If ``value not in arg``, then it returns ``default`` if defined, otherwise ``[]``.
+
+ - Creation:
+ ``cmd_runner_fmt.as_map(dict(a=1, b=2, c=3), default=42)``
+ - Examples:
+ +---------------------+---------------+
+ | Value | Outcome |
+ +=====================+===============+
+ | ``"b"`` | ``["2"]`` |
+ +---------------------+---------------+
+ | ``"yabadabadoo"`` | ``["42"]`` |
+ +---------------------+---------------+
+
+ - Note:
+ If ``default`` is not specified, invalid values return an empty list, meaning they are silently ignored.
+
+- ``cmd_runner_fmt.as_func()``
+ This method receives one parameter ``arg`` which is itself is a format function and it must abide by the rules described above.
+
+ - Creation:
+ ``cmd_runner_fmt.as_func(lambda v: [] if v == 'stable' else ['--channel', '{0}'.format(v)])``
+ - Note:
+ The outcome for that depends entirely on the function provided by the developer.
+
+
+Other features for argument formatting
+""""""""""""""""""""""""""""""""""""""
+
+Some additional features are available as decorators:
+
+- ``cmd_runner_fmt.unpack args()``
+ This decorator unpacks the incoming ``value`` as a list of elements.
+
+ For example, in ``ansible_collections.community.general.plugins.module_utils.puppet``, it is used as:
+
+ .. code-block:: python
+
+ @cmd_runner_fmt.unpack_args
+ def execute_func(execute, manifest):
+ if execute:
+ return ["--execute", execute]
+ else:
+ return [manifest]
+
+ runner = CmdRunner(
+ module,
+ command=_prepare_base_cmd(),
+ path_prefix=_PUPPET_PATH_PREFIX,
+ arg_formats=dict(
+ # ...
+ _execute=cmd_runner_fmt.as_func(execute_func),
+ # ...
+ ),
+ )
+
+ Then, in :ansplugin:`community.general.puppet#module` it is put to use with:
+
+ .. code-block:: python
+
+ with runner(args_order) as ctx:
+ rc, stdout, stderr = ctx.run(_execute=[p['execute'], p['manifest']])
+
+- ``cmd_runner_fmt.unpack_kwargs()``
+ Conversely, this decorator unpacks the incoming ``value`` as a ``dict``-like object.
+
+- ``cmd_runner_fmt.stack()``
+ This decorator assumes ``value`` is a sequence and concatenates the output
+ of the wrapped function applied to each element of the sequence.
+
+ For example, in :ansplugin:`community.general.django_check#module`, the argument format for ``database``
+ is defined as:
+
+ .. code-block:: python
+
+ arg_formats = dict(
+ # ...
+ database=cmd_runner_fmt.stack(cmd_runner_fmt.as_opt_val)("--database"),
+ # ...
+ )
+
+ When receiving a list ``["abc", "def"]``, the output is:
+
+ .. code-block:: python
+
+ ["--database", "abc", "--database", "def"]
+
+
+Command Runner
+^^^^^^^^^^^^^^
+
+Settings that can be passed to the ``CmdRunner`` constructor are:
+
+- ``module: AnsibleModule``
+ Module instance. Mandatory parameter.
+- ``command: str | list[str]``
+ Command to be executed. It can be a single string, the executable name, or a list
+ of strings containing the executable name as the first element and, optionally, fixed parameters.
+ Those parameters are used in all executions of the runner.
+ The *executable* pointed by this parameter (whether itself when ``str`` or its first element when ``list``) is
+ processed using ``AnsibleModule.get_bin_path()`` *unless* it is an absolute path or contains the character ``/``.
+- ``arg_formats: dict``
+ Mapping of argument names to formatting functions.
+- ``default_args_order: str``
+ As the name suggests, a default ordering for the arguments. When
+ this is passed, the context can be created without specifying ``args_order``. Defaults to ``()``.
+- ``check_rc: bool``
+ When ``True``, if the return code from the command is not zero, the module exits
+ with an error. Defaults to ``False``.
+- ``path_prefix: list[str]``
+ If the command being executed is installed in a non-standard directory path,
+ additional paths might be provided to search for the executable. Defaults to ``None``.
+- ``environ_update: dict``
+ Pass additional environment variables to be set during the command execution.
+ Defaults to ``None``.
+- ``force_lang: str``
+ It is usually important to force the locale to one specific value, so that responses are consistent and, therefore, parseable.
+ Please note that using this option (which is enabled by default) overwrites the environment variables ``LANGUAGE`` and ``LC_ALL``.
+ To disable this mechanism, set this parameter to ``None``.
+ In community.general 9.1.0 a special value ``auto`` was introduced for this parameter, with the effect
+ that ``CmdRunner`` then tries to determine the best parseable locale for the runtime.
+ It should become the default value in the future, but for the time being the default value is ``C``.
+
+When creating a context, the additional settings that can be passed to the call are:
+
+- ``args_order: str``
+ Establishes the order in which the arguments are rendered in the command line.
+ This parameter is mandatory unless ``default_args_order`` was provided to the runner instance.
+- ``output_process: func``
+ Function to transform the output of the executable into different values or formats.
+ See examples in section below.
+- ``check_mode_skip: bool``
+ Whether to skip the actual execution of the command when the module is in check mode.
+ Defaults to ``False``.
+- ``check_mode_return: any``
+ If ``check_mode_skip=True``, then return this value instead.
+- valid named arguments to ``AnsibleModule.run_command()``
+ Other than ``args``, any valid argument to ``run_command()`` can be passed when setting up the run context.
+ For example, ``data`` can be used to send information to the command's standard input.
+ Or ``cwd`` can be used to run the command inside a specific working directory.
+
+Additionally, any other valid parameters for ``AnsibleModule.run_command()`` may be passed, but unexpected behavior
+might occur if redefining options already present in the runner or its context creation. Use with caution.
+
+
+Processing results
+^^^^^^^^^^^^^^^^^^
+
+As mentioned, ``CmdRunner`` uses ``AnsibleModule.run_command()`` to execute the external command,
+and it passes the return value from that method back to caller. That means that,
+by default, the result is going to be a tuple ``(rc, stdout, stderr)``.
+
+If you need to transform or process that output, you can pass a function to the context,
+as the ``output_process`` parameter. It must be a function like:
+
+.. code-block:: python
+
+ def process(rc, stdout, stderr):
+ # do some magic
+ return processed_value # whatever that is
+
+In that case, the return of ``run()`` is the ``processed_value`` returned by the function.
+
+
+PythonRunner
+^^^^^^^^^^^^
+
+The ``PythonRunner`` class is a specialized version of ``CmdRunner``, geared towards the execution of
+Python scripts. It features two extra and mutually exclusive parameters ``python`` and ``venv`` in its constructor:
+
+.. code-block:: python
+
+ from ansible_collections.community.general.plugins.module_utils.python_runner import PythonRunner
+ from ansible_collections.community.general.plugins.module_utils.cmd_runner import cmd_runner_fmt
+
+ runner = PythonRunner(
+ module,
+ command=["-m", "django"],
+ arg_formats=dict(...),
+ python="python",
+ venv="/path/to/some/venv",
+ )
+
+The default value for ``python`` is the string ``python``, and the for ``venv`` it is ``None``.
+
+The command line produced by such a command with ``python="python3.12"`` is something like:
+
+.. code-block:: shell
+
+ /usr/bin/python3.12 -m django ...
+
+And the command line for ``venv="/work/venv"`` is like:
+
+.. code-block:: shell
+
+ /work/venv/bin/python -m django ...
+
+You may provide the value of the ``command`` argument as a string (in that case the string is used as a script name)
+or as a list, in which case the elements of the list must be valid arguments for the Python interpreter, as in the example above.
+See `Command line and environment `_ for more details.
+
+If the parameter ``python`` is an absolute path, or contains directory separators, such as ``/``, then it is used
+as-is, otherwise the runtime ``PATH`` is searched for that command name.
+
+Other than that, everything else works as in ``CmdRunner``.
+
+.. versionadded:: 4.8.0
diff --git a/docs/docsite/rst/guide_deps.rst b/docs/docsite/rst/guide_deps.rst
new file mode 100644
index 0000000000..1a44051ee4
--- /dev/null
+++ b/docs/docsite/rst/guide_deps.rst
@@ -0,0 +1,75 @@
+..
+ Copyright (c) Ansible Project
+ GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+ SPDX-License-Identifier: GPL-3.0-or-later
+
+.. _ansible_collections.community.general.docsite.guide_deps:
+
+``deps`` Guide
+==============
+
+
+Using ``deps``
+^^^^^^^^^^^^^^
+
+The ``ansible_collections.community.general.plugins.module_utils.deps`` module util simplifies
+the importing of code as described in :ref:`Importing and using shared code `.
+Please notice that ``deps`` is meant to be used specifically with Ansible modules, and not other types of plugins.
+
+The same example from the Developer Guide would become:
+
+.. code-block:: python
+
+ from ansible_collections.community.general.plugins.module_utils import deps
+
+
+ with deps.declare("foo"):
+ import foo
+
+Then in ``main()``, just after the argspec (or anywhere in the code, for that matter), do
+
+.. code-block:: python
+
+ deps.validate(module) # assuming module is a valid AnsibleModule instance
+
+By default, ``deps`` will rely on ``ansible.module_utils.basic.missing_required_lib`` to generate
+a message about a failing import. That function accepts parameters ``reason`` and ``url``, and
+and so does ``deps```:
+
+.. code-block:: python
+
+ with deps.declare("foo", reason="foo is needed to properly bar", url="https://foo.bar.io"):
+ import foo
+
+If you would rather write a custom message instead of using ``missing_required_lib`` then do:
+
+.. code-block:: python
+
+ with deps.declare("foo", msg="Custom msg explaining why foo is needed"):
+ import foo
+
+``deps`` allows for multiple dependencies to be declared:
+
+.. code-block:: python
+
+ with deps.declare("foo"):
+ import foo
+
+ with deps.declare("bar"):
+ import bar
+
+ with deps.declare("doe"):
+ import doe
+
+By default, ``deps.validate()`` will check on all the declared dependencies, but if so desired,
+they can be validated selectively by doing:
+
+.. code-block:: python
+
+ deps.validate(module, "foo") # only validates the "foo" dependency
+
+ deps.validate(module, "doe:bar") # only validates the "doe" and "bar" dependencies
+
+ deps.validate(module, "-doe:bar") # validates all dependencies except "doe" and "bar"
+
+.. versionadded:: 6.1.0
diff --git a/docs/docsite/helper/lists_mergeby/examples_all.rst.j2 b/docs/docsite/rst/guide_iocage.rst
similarity index 55%
rename from docs/docsite/helper/lists_mergeby/examples_all.rst.j2
rename to docs/docsite/rst/guide_iocage.rst
index 95a0fafddc..67eb0e8a99 100644
--- a/docs/docsite/helper/lists_mergeby/examples_all.rst.j2
+++ b/docs/docsite/rst/guide_iocage.rst
@@ -3,11 +3,13 @@
GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
SPDX-License-Identifier: GPL-3.0-or-later
-{% for i in examples %}
-{{ i.label }}
+.. _ansible_collections.community.general.docsite.guide_iocage:
-.. code-block:: {{ i.lang }}
+************
+Iocage Guide
+************
- {{ lookup('file', i.file)|indent(2) }}
+.. toctree::
+ :maxdepth: 1
-{% endfor %}
+ guide_iocage_inventory
diff --git a/docs/docsite/rst/guide_iocage_inventory.rst b/docs/docsite/rst/guide_iocage_inventory.rst
new file mode 100644
index 0000000000..4a410c35db
--- /dev/null
+++ b/docs/docsite/rst/guide_iocage_inventory.rst
@@ -0,0 +1,31 @@
+..
+ Copyright (c) Ansible Project
+ GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+ SPDX-License-Identifier: GPL-3.0-or-later
+
+.. _ansible_collections.community.general.docsite.guide_iocage.guide_iocage_inventory:
+
+community.general.iocage inventory plugin
+=========================================
+
+The inventory plugin :ansplugin:`community.general.iocage#inventory` gets the inventory hosts from the iocage jail manager.
+
+See:
+
+* `iocage - A FreeBSD Jail Manager `_
+* `man iocage `_
+* `Jails and Containers `_
+
+.. note::
+ The output of the examples is YAML formatted. See the option :ansopt:`ansible.builtin.default#callback:result_format`.
+
+.. toctree::
+ :caption: Table of Contents
+ :maxdepth: 1
+
+ guide_iocage_inventory_basics
+ guide_iocage_inventory_dhcp
+ guide_iocage_inventory_hooks
+ guide_iocage_inventory_properties
+ guide_iocage_inventory_tags
+ guide_iocage_inventory_aliases
diff --git a/docs/docsite/rst/guide_iocage_inventory_aliases.rst b/docs/docsite/rst/guide_iocage_inventory_aliases.rst
new file mode 100644
index 0000000000..431403d733
--- /dev/null
+++ b/docs/docsite/rst/guide_iocage_inventory_aliases.rst
@@ -0,0 +1,200 @@
+..
+ Copyright (c) Ansible Project
+ GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+ SPDX-License-Identifier: GPL-3.0-or-later
+
+.. _ansible_collections.community.general.docsite.guide_iocage.guide_iocage_inventory.guide_iocage_inventory_aliases:
+
+Aliases
+-------
+
+Quoting :ref:`inventory_aliases`:
+
+ The ``inventory_hostname`` is the unique identifier for a host in Ansible, this can be an IP or a hostname, but also just an 'alias' or short name for the host.
+
+As root at the iocage host, stop and destroy all jails:
+
+.. code-block:: console
+
+ shell> iocage stop ALL
+ * Stopping srv_1
+ + Executing prestop OK
+ + Stopping services OK
+ + Tearing down VNET OK
+ + Removing devfs_ruleset: 1000 OK
+ + Removing jail process OK
+ + Executing poststop OK
+ * Stopping srv_2
+ + Executing prestop OK
+ + Stopping services OK
+ + Tearing down VNET OK
+ + Removing devfs_ruleset: 1001 OK
+ + Removing jail process OK
+ + Executing poststop OK
+ * Stopping srv_3
+ + Executing prestop OK
+ + Stopping services OK
+ + Tearing down VNET OK
+ + Removing devfs_ruleset: 1002 OK
+ + Removing jail process OK
+ + Executing poststop OK
+ ansible_client is not running!
+
+ shell> iocage destroy -f srv_1 srv_2 srv_3
+ Destroying srv_1
+ Destroying srv_2
+ Destroying srv_3
+
+Create three VNET jails with a DHCP interface from the template *ansible_client*. Use the option ``--count``:
+
+.. code-block:: console
+
+ shell> iocage create --short --template ansible_client --count 3 bpf=1 dhcp=1 vnet=1
+ 1c11de2d successfully created!
+ 9d94cc9e successfully created!
+ 052b9557 successfully created!
+
+The names are random. Start the jails:
+
+.. code-block:: console
+
+ shell> iocage start ALL
+ No default gateway found for ipv6.
+ * Starting 052b9557
+ + Started OK
+ + Using devfs_ruleset: 1000 (iocage generated default)
+ + Configuring VNET OK
+ + Using IP options: vnet
+ + Starting services OK
+ + Executing poststart OK
+ + DHCP Address: 10.1.0.137/24
+ No default gateway found for ipv6.
+ * Starting 1c11de2d
+ + Started OK
+ + Using devfs_ruleset: 1001 (iocage generated default)
+ + Configuring VNET OK
+ + Using IP options: vnet
+ + Starting services OK
+ + Executing poststart OK
+ + DHCP Address: 10.1.0.146/24
+ No default gateway found for ipv6.
+ * Starting 9d94cc9e
+ + Started OK
+ + Using devfs_ruleset: 1002 (iocage generated default)
+ + Configuring VNET OK
+ + Using IP options: vnet
+ + Starting services OK
+ + Executing poststart OK
+ + DHCP Address: 10.1.0.115/24
+ Please convert back to a jail before trying to start ansible_client
+
+List the jails:
+
+.. code-block:: console
+
+ shell> iocage list -l
+ +-----+----------+------+-------+------+-----------------+--------------------+-----+----------------+----------+
+ | JID | NAME | BOOT | STATE | TYPE | RELEASE | IP4 | IP6 | TEMPLATE | BASEJAIL |
+ +=====+==========+======+=======+======+=================+====================+=====+================+==========+
+ | 207 | 052b9557 | off | up | jail | 14.2-RELEASE-p3 | epair0b|10.1.0.137 | - | ansible_client | no |
+ +-----+----------+------+-------+------+-----------------+--------------------+-----+----------------+----------+
+ | 208 | 1c11de2d | off | up | jail | 14.2-RELEASE-p3 | epair0b|10.1.0.146 | - | ansible_client | no |
+ +-----+----------+------+-------+------+-----------------+--------------------+-----+----------------+----------+
+ | 209 | 9d94cc9e | off | up | jail | 14.2-RELEASE-p3 | epair0b|10.1.0.115 | - | ansible_client | no |
+ +-----+----------+------+-------+------+-----------------+--------------------+-----+----------------+----------+
+
+Set notes. The tag *alias* will be used to create inventory aliases:
+
+.. code-block:: console
+
+ shell> iocage set notes="vmm=iocage_02 project=foo alias=srv_1" 052b9557
+ notes: none -> vmm=iocage_02 project=foo alias=srv_1
+ shell> iocage set notes="vmm=iocage_02 project=foo alias=srv_2" 1c11de2d
+ notes: none -> vmm=iocage_02 project=foo alias=srv_2
+ shell> iocage set notes="vmm=iocage_02 project=bar alias=srv_3" 9d94cc9e
+ notes: none -> vmm=iocage_02 project=bar alias=srv_3
+
+Update the inventory configuration. Set the option
+:ansopt:`community.general.iocage#inventory:inventory_hostname_tag` to :ansval:`alias`. This tag keeps the
+value of the alias. The option :ansopt:`community.general.iocage#inventory:get_properties` must be
+enabled. For example, ``hosts/02_iocage.yml`` contains:
+
+.. code-block:: yaml
+
+ plugin: community.general.iocage
+ host: 10.1.0.73
+ user: admin
+ get_properties: true
+ inventory_hostname_tag: alias
+ hooks_results:
+ - /var/db/dhclient-hook.address.epair0b
+ compose:
+ ansible_host: (iocage_hooks.0 == '-') | ternary(iocage_ip4, iocage_hooks.0)
+ iocage_tags: dict(iocage_properties.notes | split | map('split', '='))
+ keyed_groups:
+ - prefix: vmm
+ key: iocage_tags.vmm
+ - prefix: project
+ key: iocage_tags.project
+
+Display tags and groups. Create a playbook ``pb-test-groups.yml`` with the following content:
+
+.. code-block:: yaml+jinja
+
+ - hosts: all
+ remote_user: admin
+
+ vars:
+
+ ansible_python_interpreter: auto_silent
+
+ tasks:
+
+ - debug:
+ var: iocage_tags
+
+ - debug:
+ msg: |
+ {% for group in groups %}
+ {{ group }}: {{ groups[group] }}
+ {% endfor %}
+ run_once: true
+
+Run the playbook:
+
+.. code-block:: console
+
+ shell> ansible-playbook -i hosts/02_iocage.yml pb-test-groups.yml
+
+ PLAY [all] **********************************************************************************************************
+
+ TASK [debug] ********************************************************************************************************
+ ok: [srv_1] =>
+ iocage_tags:
+ alias: srv_1
+ project: foo
+ vmm: iocage_02
+ ok: [srv_2] =>
+ iocage_tags:
+ alias: srv_2
+ project: foo
+ vmm: iocage_02
+ ok: [srv_3] =>
+ iocage_tags:
+ alias: srv_3
+ project: bar
+ vmm: iocage_02
+
+ TASK [debug] ********************************************************************************************************
+ ok: [srv_1] =>
+ msg: |-
+ all: ['srv_1', 'srv_2', 'srv_3']
+ ungrouped: []
+ vmm_iocage_02: ['srv_1', 'srv_2', 'srv_3']
+ project_foo: ['srv_1', 'srv_2']
+ project_bar: ['srv_3']
+
+ PLAY RECAP **********************************************************************************************************
+ srv_1 : ok=2 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0
+ srv_2 : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0
+ srv_3 : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0
diff --git a/docs/docsite/rst/guide_iocage_inventory_basics.rst b/docs/docsite/rst/guide_iocage_inventory_basics.rst
new file mode 100644
index 0000000000..f198edc4f4
--- /dev/null
+++ b/docs/docsite/rst/guide_iocage_inventory_basics.rst
@@ -0,0 +1,128 @@
+..
+ Copyright (c) Ansible Project
+ GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+ SPDX-License-Identifier: GPL-3.0-or-later
+
+.. _ansible_collections.community.general.docsite.guide_iocage.guide_iocage_inventory.guide_iocage_inventory_basics:
+
+Basics
+------
+
+As root at the iocage host, create three VNET jails with a DHCP interface from the template
+*ansible_client*:
+
+.. code-block:: console
+
+ shell> iocage create --template ansible_client --name srv_1 bpf=1 dhcp=1 vnet=1
+ srv_1 successfully created!
+ shell> iocage create --template ansible_client --name srv_2 bpf=1 dhcp=1 vnet=1
+ srv_2 successfully created!
+ shell> iocage create --template ansible_client --name srv_3 bpf=1 dhcp=1 vnet=1
+ srv_3 successfully created!
+
+See: `Configuring a VNET Jail `_.
+
+As admin at the controller, list the jails:
+
+.. code-block:: console
+
+ shell> ssh admin@10.1.0.73 iocage list -l
+ +------+-------+------+-------+------+-----------------+--------------------+-----+----------------+----------+
+ | JID | NAME | BOOT | STATE | TYPE | RELEASE | IP4 | IP6 | TEMPLATE | BASEJAIL |
+ +======+=======+======+=======+======+=================+====================+=====+================+==========+
+ | None | srv_1 | off | down | jail | 14.2-RELEASE-p3 | DHCP (not running) | - | ansible_client | no |
+ +------+-------+------+-------+------+-----------------+--------------------+-----+----------------+----------+
+ | None | srv_2 | off | down | jail | 14.2-RELEASE-p3 | DHCP (not running) | - | ansible_client | no |
+ +------+-------+------+-------+------+-----------------+--------------------+-----+----------------+----------+
+ | None | srv_3 | off | down | jail | 14.2-RELEASE-p3 | DHCP (not running) | - | ansible_client | no |
+ +------+-------+------+-------+------+-----------------+--------------------+-----+----------------+----------+
+
+Create the inventory file ``hosts/02_iocage.yml``
+
+.. code-block:: yaml
+
+ plugin: community.general.iocage
+ host: 10.1.0.73
+ user: admin
+
+Display the inventory:
+
+.. code-block:: console
+
+ shell> ansible-inventory -i hosts/02_iocage.yml --list --yaml
+ all:
+ children:
+ ungrouped:
+ hosts:
+ srv_1:
+ iocage_basejail: 'no'
+ iocage_boot: 'off'
+ iocage_ip4: '-'
+ iocage_ip4_dict:
+ ip4: []
+ msg: DHCP (not running)
+ iocage_ip6: '-'
+ iocage_jid: None
+ iocage_release: 14.2-RELEASE-p3
+ iocage_state: down
+ iocage_template: ansible_client
+ iocage_type: jail
+ srv_2:
+ iocage_basejail: 'no'
+ iocage_boot: 'off'
+ iocage_ip4: '-'
+ iocage_ip4_dict:
+ ip4: []
+ msg: DHCP (not running)
+ iocage_ip6: '-'
+ iocage_jid: None
+ iocage_release: 14.2-RELEASE-p3
+ iocage_state: down
+ iocage_template: ansible_client
+ iocage_type: jail
+ srv_3:
+ iocage_basejail: 'no'
+ iocage_boot: 'off'
+ iocage_ip4: '-'
+ iocage_ip4_dict:
+ ip4: []
+ msg: DHCP (not running)
+ iocage_ip6: '-'
+ iocage_jid: None
+ iocage_release: 14.2-RELEASE-p3
+ iocage_state: down
+ iocage_template: ansible_client
+ iocage_type: jail
+
+Optionally, create shared IP jails:
+
+.. code-block:: console
+
+ shell> iocage create --template ansible_client --name srv_1 ip4_addr="em0|10.1.0.101/24"
+ srv_1 successfully created!
+ shell> iocage create --template ansible_client --name srv_2 ip4_addr="em0|10.1.0.102/24"
+ srv_2 successfully created!
+ shell> iocage create --template ansible_client --name srv_3 ip4_addr="em0|10.1.0.103/24"
+ srv_3 successfully created!
+ shell> iocage list -l
+ +------+-------+------+-------+------+-----------------+-------------------+-----+----------------+----------+
+ | JID | NAME | BOOT | STATE | TYPE | RELEASE | IP4 | IP6 | TEMPLATE | BASEJAIL |
+ +======+=======+======+=======+======+=================+===================+=====+================+==========+
+ | None | srv_1 | off | down | jail | 14.2-RELEASE-p3 | em0|10.1.0.101/24 | - | ansible_client | no |
+ +------+-------+------+-------+------+-----------------+-------------------+-----+----------------+----------+
+ | None | srv_2 | off | down | jail | 14.2-RELEASE-p3 | em0|10.1.0.102/24 | - | ansible_client | no |
+ +------+-------+------+-------+------+-----------------+-------------------+-----+----------------+----------+
+ | None | srv_3 | off | down | jail | 14.2-RELEASE-p3 | em0|10.1.0.103/24 | - | ansible_client | no |
+ +------+-------+------+-------+------+-----------------+-------------------+-----+----------------+----------+
+
+See: `Configuring a Shared IP Jail `_
+
+If iocage needs environment variable(s), use the option :ansopt:`community.general.iocage#inventory:env`. For example,
+
+.. code-block:: yaml
+
+ plugin: community.general.iocage
+ host: 10.1.0.73
+ user: admin
+ env:
+ CRYPTOGRAPHY_OPENSSL_NO_LEGACY: 1
diff --git a/docs/docsite/rst/guide_iocage_inventory_dhcp.rst b/docs/docsite/rst/guide_iocage_inventory_dhcp.rst
new file mode 100644
index 0000000000..3c37366ca6
--- /dev/null
+++ b/docs/docsite/rst/guide_iocage_inventory_dhcp.rst
@@ -0,0 +1,175 @@
+..
+ Copyright (c) Ansible Project
+ GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+ SPDX-License-Identifier: GPL-3.0-or-later
+
+.. _ansible_collections.community.general.docsite.guide_iocage.guide_iocage_inventory.guide_iocage_inventory_dhcp:
+
+DHCP
+----
+
+As root at the iocage host, start the jails:
+
+.. code-block:: console
+
+ shell> iocage start ALL
+ No default gateway found for ipv6.
+ * Starting srv_1
+ + Started OK
+ + Using devfs_ruleset: 1000 (iocage generated default)
+ + Configuring VNET OK
+ + Using IP options: vnet
+ + Starting services OK
+ + Executing poststart OK
+ + DHCP Address: 10.1.0.183/24
+ No default gateway found for ipv6.
+ * Starting srv_2
+ + Started OK
+ + Using devfs_ruleset: 1001 (iocage generated default)
+ + Configuring VNET OK
+ + Using IP options: vnet
+ + Starting services OK
+ + Executing poststart OK
+ + DHCP Address: 10.1.0.204/24
+ No default gateway found for ipv6.
+ * Starting srv_3
+ + Started OK
+ + Using devfs_ruleset: 1002 (iocage generated default)
+ + Configuring VNET OK
+ + Using IP options: vnet
+ + Starting services OK
+ + Executing poststart OK
+ + DHCP Address: 10.1.0.169/24
+ Please convert back to a jail before trying to start ansible_client
+
+List the jails:
+
+.. code-block:: console
+
+ shell> iocage list -l
+ +-----+-------+------+-------+------+-----------------+--------------------+-----+----------------+----------+
+ | JID | NAME | BOOT | STATE | TYPE | RELEASE | IP4 | IP6 | TEMPLATE | BASEJAIL |
+ +=====+=======+======+=======+======+=================+====================+=====+================+==========+
+ | 204 | srv_1 | off | up | jail | 14.2-RELEASE-p3 | epair0b|10.1.0.183 | - | ansible_client | no |
+ +-----+-------+------+-------+------+-----------------+--------------------+-----+----------------+----------+
+ | 205 | srv_2 | off | up | jail | 14.2-RELEASE-p3 | epair0b|10.1.0.204 | - | ansible_client | no |
+ +-----+-------+------+-------+------+-----------------+--------------------+-----+----------------+----------+
+ | 206 | srv_3 | off | up | jail | 14.2-RELEASE-p3 | epair0b|10.1.0.169 | - | ansible_client | no |
+ +-----+-------+------+-------+------+-----------------+--------------------+-----+----------------+----------+
+
+As admin at the controller, list the jails. The IP4 tab says "... address requires root":
+
+.. code-block:: console
+
+ shell> ssh admin@10.1.0.73 iocage list -l
+ +-----+-------+------+-------+------+-----------------+-----------------------------------------+-----+----------------+----------+
+ | JID | NAME | BOOT | STATE | TYPE | RELEASE | IP4 | IP6 | TEMPLATE | BASEJAIL |
+ +=====+=======+======+=======+======+=================+=========================================+=====+================+==========+
+ | 204 | srv_1 | off | up | jail | 14.2-RELEASE-p3 | DHCP (running -- address requires root) | - | ansible_client | no |
+ +-----+-------+------+-------+------+-----------------+-----------------------------------------+-----+----------------+----------+
+ | 205 | srv_2 | off | up | jail | 14.2-RELEASE-p3 | DHCP (running -- address requires root) | - | ansible_client | no |
+ +-----+-------+------+-------+------+-----------------+-----------------------------------------+-----+----------------+----------+
+ | 206 | srv_3 | off | up | jail | 14.2-RELEASE-p3 | DHCP (running -- address requires root) | - | ansible_client | no |
+ +-----+-------+------+-------+------+-----------------+-----------------------------------------+-----+----------------+----------+
+
+Use sudo if enabled:
+
+.. code-block:: console
+
+ shell> ssh admin@10.1.0.73 sudo iocage list -l
+ +-----+-------+------+-------+------+-----------------+--------------------+-----+----------------+----------+
+ | JID | NAME | BOOT | STATE | TYPE | RELEASE | IP4 | IP6 | TEMPLATE | BASEJAIL |
+ +=====+=======+======+=======+======+=================+====================+=====+================+==========+
+ | 204 | srv_1 | off | up | jail | 14.2-RELEASE-p3 | epair0b|10.1.0.183 | - | ansible_client | no |
+ +-----+-------+------+-------+------+-----------------+--------------------+-----+----------------+----------+
+ | 205 | srv_2 | off | up | jail | 14.2-RELEASE-p3 | epair0b|10.1.0.204 | - | ansible_client | no |
+ +-----+-------+------+-------+------+-----------------+--------------------+-----+----------------+----------+
+ | 206 | srv_3 | off | up | jail | 14.2-RELEASE-p3 | epair0b|10.1.0.169 | - | ansible_client | no |
+ +-----+-------+------+-------+------+-----------------+--------------------+-----+----------------+----------+
+
+Create the inventory file ``hosts/02_iocage.yml``. Use the option
+:ansopt:`community.general.iocage#inventory:sudo`:
+
+.. code-block:: yaml
+
+ plugin: community.general.iocage
+ host: 10.1.0.73
+ user: admin
+ sudo: true
+
+Display the inventory:
+
+.. code-block:: console
+
+ shell> ansible-inventory -i hosts/02_iocage.yml --list --yaml
+ all:
+ children:
+ ungrouped:
+ hosts:
+ srv_1:
+ iocage_basejail: 'no'
+ iocage_boot: 'off'
+ iocage_ip4: 10.1.0.183
+ iocage_ip4_dict:
+ ip4:
+ - ifc: epair0b
+ ip: 10.1.0.183
+ mask: '-'
+ msg: ''
+ iocage_ip6: '-'
+ iocage_jid: '204'
+ iocage_release: 14.2-RELEASE-p3
+ iocage_state: up
+ iocage_template: ansible_client
+ iocage_type: jail
+ srv_2:
+ iocage_basejail: 'no'
+ iocage_boot: 'off'
+ iocage_ip4: 10.1.0.204
+ iocage_ip4_dict:
+ ip4:
+ - ifc: epair0b
+ ip: 10.1.0.204
+ mask: '-'
+ msg: ''
+ iocage_ip6: '-'
+ iocage_jid: '205'
+ iocage_release: 14.2-RELEASE-p3
+ iocage_state: up
+ iocage_template: ansible_client
+ iocage_type: jail
+ srv_3:
+ iocage_basejail: 'no'
+ iocage_boot: 'off'
+ iocage_ip4: 10.1.0.169
+ iocage_ip4_dict:
+ ip4:
+ - ifc: epair0b
+ ip: 10.1.0.169
+ mask: '-'
+ msg: ''
+ iocage_ip6: '-'
+ iocage_jid: '206'
+ iocage_release: 14.2-RELEASE-p3
+ iocage_state: up
+ iocage_template: ansible_client
+ iocage_type: jail
+
+Note: If the option :ansopt:`community.general.iocage#inventory:env` is used and :ansopt:`community.general.iocage#inventory:sudo` is enabled, enable also :ansopt:`community.general.iocage#inventory:sudo_preserve_env`. For example,
+
+.. code-block:: yaml
+
+ plugin: community.general.iocage
+ host: 10.1.0.73
+ user: admin
+ env:
+ CRYPTOGRAPHY_OPENSSL_NO_LEGACY: 1
+ sudo: true
+ sudo_preserve_env: true
+
+In this case, make sure the sudo tag ``SETENV`` is used:
+
+.. code-block:: console
+
+ shell> ssh admin@10.1.0.73 sudo cat /usr/local/etc/sudoers | grep admin
+ admin ALL=(ALL) NOPASSWD:SETENV: ALL
diff --git a/docs/docsite/rst/guide_iocage_inventory_hooks.rst b/docs/docsite/rst/guide_iocage_inventory_hooks.rst
new file mode 100644
index 0000000000..45364fc798
--- /dev/null
+++ b/docs/docsite/rst/guide_iocage_inventory_hooks.rst
@@ -0,0 +1,187 @@
+..
+ Copyright (c) Ansible Project
+ GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+ SPDX-License-Identifier: GPL-3.0-or-later
+
+.. _ansible_collections.community.general.docsite.guide_iocage.guide_iocage_inventory.guide_iocage_inventory_hooks:
+
+Hooks
+-----
+
+The iocage utility internally opens a console to a jail to get the jail's DHCP address. This
+requires root. If you run the command ``iocage list -l`` as unprivileged user, you'll see the
+message ``DHCP (running -- address requires root)``. If you are not granted the root privilege, use
+``/etc/dhclient-exit-hooks``. For example, in the jail *srv_1*, create the file
+``/zroot/iocage/jails/srv_1/root/etc/dhclient-exit-hooks``
+
+.. code-block:: shell
+
+ case "$reason" in
+ "BOUND"|"REBIND"|"REBOOT"|"RENEW")
+ echo $new_ip_address > /var/db/dhclient-hook.address.$interface
+ ;;
+ esac
+
+where ``/zroot/iocage`` is the activated pool.
+
+.. code-block:: console
+
+ shell> zfs list | grep /zroot/iocage
+ zroot/iocage 4.69G 446G 5.08M /zroot/iocage
+ zroot/iocage/download 927M 446G 384K /zroot/iocage/download
+ zroot/iocage/download/14.1-RELEASE 465M 446G 465M /zroot/iocage/download/14.1-RELEASE
+ zroot/iocage/download/14.2-RELEASE 462M 446G 462M /zroot/iocage/download/14.2-RELEASE
+ zroot/iocage/images 384K 446G 384K /zroot/iocage/images
+ zroot/iocage/jails 189M 446G 480K /zroot/iocage/jails
+ zroot/iocage/jails/srv_1 62.9M 446G 464K /zroot/iocage/jails/srv_1
+ zroot/iocage/jails/srv_1/root 62.4M 446G 3.53G /zroot/iocage/jails/srv_1/root
+ zroot/iocage/jails/srv_2 62.8M 446G 464K /zroot/iocage/jails/srv_2
+ zroot/iocage/jails/srv_2/root 62.3M 446G 3.53G /zroot/iocage/jails/srv_2/root
+ zroot/iocage/jails/srv_3 62.8M 446G 464K /zroot/iocage/jails/srv_3
+ zroot/iocage/jails/srv_3/root 62.3M 446G 3.53G /zroot/iocage/jails/srv_3/root
+ zroot/iocage/log 688K 446G 688K /zroot/iocage/log
+ zroot/iocage/releases 2.93G 446G 384K /zroot/iocage/releases
+ zroot/iocage/releases/14.2-RELEASE 2.93G 446G 384K /zroot/iocage/releases/14.2-RELEASE
+ zroot/iocage/releases/14.2-RELEASE/root 2.93G 446G 2.88G /zroot/iocage/releases/14.2-RELEASE/root
+ zroot/iocage/templates 682M 446G 416K /zroot/iocage/templates
+ zroot/iocage/templates/ansible_client 681M 446G 432K /zroot/iocage/templates/ansible_client
+ zroot/iocage/templates/ansible_client/root 681M 446G 3.53G /zroot/iocage/templates/ansible_client/root
+
+See: `man dhclient-script `_
+
+Create the inventory configuration. Use the option :ansopt:`community.general.iocage#inventory:hooks_results` instead of :ansopt:`community.general.iocage#inventory:sudo`:
+
+.. code-block:: console
+
+ shell> cat hosts/02_iocage.yml
+
+.. code-block:: yaml
+
+ plugin: community.general.iocage
+ host: 10.1.0.73
+ user: admin
+ hooks_results:
+ - /var/db/dhclient-hook.address.epair0b
+
+.. note::
+
+ The option :ansopt:`community.general.iocage#inventory:hooks_results` expects the poolname to be mounted to ``/poolname``. For example, if you
+ activate the pool iocage, this plugin expects to find the :ansopt:`community.general.iocage#inventory:hooks_results` items in the path
+ /iocage/iocage/jails//root. If you mount the poolname to a different path, the easiest
+ remedy is to create a symlink.
+
+As admin at the controller, display the inventory:
+
+.. code-block:: console
+
+ shell> ansible-inventory -i hosts/02_iocage.yml --list --yaml
+ all:
+ children:
+ ungrouped:
+ hosts:
+ srv_1:
+ iocage_basejail: 'no'
+ iocage_boot: 'off'
+ iocage_hooks:
+ - 10.1.0.183
+ iocage_ip4: '-'
+ iocage_ip4_dict:
+ ip4: []
+ msg: DHCP (running -- address requires root)
+ iocage_ip6: '-'
+ iocage_jid: '204'
+ iocage_release: 14.2-RELEASE-p3
+ iocage_state: up
+ iocage_template: ansible_client
+ iocage_type: jail
+ srv_2:
+ iocage_basejail: 'no'
+ iocage_boot: 'off'
+ iocage_hooks:
+ - 10.1.0.204
+ iocage_ip4: '-'
+ iocage_ip4_dict:
+ ip4: []
+ msg: DHCP (running -- address requires root)
+ iocage_ip6: '-'
+ iocage_jid: '205'
+ iocage_release: 14.2-RELEASE-p3
+ iocage_state: up
+ iocage_template: ansible_client
+ iocage_type: jail
+ srv_3:
+ iocage_basejail: 'no'
+ iocage_boot: 'off'
+ iocage_hooks:
+ - 10.1.0.169
+ iocage_ip4: '-'
+ iocage_ip4_dict:
+ ip4: []
+ msg: DHCP (running -- address requires root)
+ iocage_ip6: '-'
+ iocage_jid: '206'
+ iocage_release: 14.2-RELEASE-p3
+ iocage_state: up
+ iocage_template: ansible_client
+ iocage_type: jail
+
+Compose the variable ``ansible_host``. For example, ``hosts/02_iocage.yml`` could look like:
+
+.. code-block:: yaml+jinja
+
+ plugin: community.general.iocage
+ host: 10.1.0.73
+ user: admin
+ hooks_results:
+ - /var/db/dhclient-hook.address.epair0b
+ compose:
+ ansible_host: (iocage_hooks.0 == '-') | ternary(iocage_ip4, iocage_hooks.0)
+
+Test the jails. Create a playbook ``pb-test-uname.yml``:
+
+.. code-block:: yaml
+
+ - hosts: all
+ remote_user: admin
+
+ vars:
+
+ ansible_python_interpreter: auto_silent
+
+ tasks:
+
+ - command: uname -a
+ register: out
+
+ - debug:
+ var: out.stdout
+
+See: :ref:`working_with_bsd`
+
+Run the playbook:
+
+.. code-block:: console
+
+ shell> ansible-playbook -i hosts/02_iocage.yml pb-test-uname.yml
+
+ PLAY [all] **********************************************************************************************************
+
+ TASK [command] ******************************************************************************************************
+ changed: [srv_3]
+ changed: [srv_1]
+ changed: [srv_2]
+
+ TASK [debug] ********************************************************************************************************
+ ok: [srv_1] =>
+ out.stdout: FreeBSD srv-1 14.2-RELEASE-p1 FreeBSD 14.2-RELEASE-p1 GENERIC amd64
+ ok: [srv_3] =>
+ out.stdout: FreeBSD srv-3 14.2-RELEASE-p1 FreeBSD 14.2-RELEASE-p1 GENERIC amd64
+ ok: [srv_2] =>
+ out.stdout: FreeBSD srv-2 14.2-RELEASE-p1 FreeBSD 14.2-RELEASE-p1 GENERIC amd64
+
+ PLAY RECAP **********************************************************************************************************
+ srv_1 : ok=2 changed=1 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0
+ srv_2 : ok=2 changed=1 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0
+ srv_3 : ok=2 changed=1 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0
+
+Note: This playbook and the inventory configuration works also for the *Shared IP Jails*.
diff --git a/docs/docsite/rst/guide_iocage_inventory_properties.rst b/docs/docsite/rst/guide_iocage_inventory_properties.rst
new file mode 100644
index 0000000000..d044f2e7f2
--- /dev/null
+++ b/docs/docsite/rst/guide_iocage_inventory_properties.rst
@@ -0,0 +1,201 @@
+..
+ Copyright (c) Ansible Project
+ GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+ SPDX-License-Identifier: GPL-3.0-or-later
+
+.. _ansible_collections.community.general.docsite.guide_iocage.guide_iocage_inventory.guide_iocage_inventory_properties:
+
+Properties
+----------
+
+Optionally, in the inventory file ``hosts/02_iocage.yml``, get the iocage properties. Enable
+:ansopt:`community.general.iocage#inventory:get_properties`:
+
+.. code-block:: yaml+jinja
+
+ plugin: community.general.iocage
+ host: 10.1.0.73
+ user: admin
+ get_properties: true
+ hooks_results:
+ - /var/db/dhclient-hook.address.epair0b
+ compose:
+ ansible_host: (iocage_hooks.0 == '-') | ternary(iocage_ip4, iocage_hooks.0)
+
+Display the properties. Create the playbook ``pb-test-properties.yml``:
+
+.. code-block:: yaml
+
+ - hosts: all
+ remote_user: admin
+
+ vars:
+
+ ansible_python_interpreter: auto_silent
+
+ tasks:
+
+ - debug:
+ var: iocage_properties
+
+Run the playbook. Limit the inventory to *srv_3*:
+
+.. code-block:: console
+
+ shell> ansible-playbook -i hosts/02_iocage.yml -l srv_3 pb-test-properties.yml
+
+ PLAY [all] **********************************************************************************************************
+
+ TASK [debug] ********************************************************************************************************
+ ok: [srv_3] =>
+ iocage_properties:
+ CONFIG_VERSION: '33'
+ allow_chflags: '0'
+ allow_mlock: '0'
+ allow_mount: '1'
+ allow_mount_devfs: '0'
+ allow_mount_fdescfs: '0'
+ allow_mount_fusefs: '0'
+ allow_mount_linprocfs: '0'
+ allow_mount_linsysfs: '0'
+ allow_mount_nullfs: '0'
+ allow_mount_procfs: '0'
+ allow_mount_tmpfs: '0'
+ allow_mount_zfs: '0'
+ allow_nfsd: '0'
+ allow_quotas: '0'
+ allow_raw_sockets: '0'
+ allow_set_hostname: '1'
+ allow_socket_af: '0'
+ allow_sysvipc: '0'
+ allow_tun: '0'
+ allow_vmm: '0'
+ assign_localhost: '0'
+ available: readonly
+ basejail: '0'
+ boot: '0'
+ bpf: '1'
+ children_max: '0'
+ cloned_release: 14.2-RELEASE
+ comment: none
+ compression: 'on'
+ compressratio: readonly
+ coredumpsize: 'off'
+ count: '1'
+ cpuset: 'off'
+ cputime: 'off'
+ datasize: 'off'
+ dedup: 'off'
+ defaultrouter: auto
+ defaultrouter6: auto
+ depends: none
+ devfs_ruleset: '4'
+ dhcp: '1'
+ enforce_statfs: '2'
+ exec_clean: '1'
+ exec_created: /usr/bin/true
+ exec_fib: '0'
+ exec_jail_user: root
+ exec_poststart: /usr/bin/true
+ exec_poststop: /usr/bin/true
+ exec_prestart: /usr/bin/true
+ exec_prestop: /usr/bin/true
+ exec_start: /bin/sh /etc/rc
+ exec_stop: /bin/sh /etc/rc.shutdown
+ exec_system_jail_user: '0'
+ exec_system_user: root
+ exec_timeout: '60'
+ host_domainname: none
+ host_hostname: srv-3
+ host_hostuuid: srv_3
+ host_time: '1'
+ hostid: ea2ba7d1-4fcd-f13f-82e4-8b32c0a03403
+ hostid_strict_check: '0'
+ interfaces: vnet0:bridge0
+ ip4: new
+ ip4_addr: none
+ ip4_saddrsel: '1'
+ ip6: new
+ ip6_addr: none
+ ip6_saddrsel: '1'
+ ip_hostname: '0'
+ jail_zfs: '0'
+ jail_zfs_dataset: iocage/jails/srv_3/data
+ jail_zfs_mountpoint: none
+ last_started: '2025-06-11 04:29:23'
+ localhost_ip: none
+ login_flags: -f root
+ mac_prefix: 02a098
+ maxproc: 'off'
+ memorylocked: 'off'
+ memoryuse: 'off'
+ min_dyn_devfs_ruleset: '1000'
+ mount_devfs: '1'
+ mount_fdescfs: '1'
+ mount_linprocfs: '0'
+ mount_procfs: '0'
+ mountpoint: readonly
+ msgqqueued: 'off'
+ msgqsize: 'off'
+ nat: '0'
+ nat_backend: ipfw
+ nat_forwards: none
+ nat_interface: none
+ nat_prefix: '172.16'
+ nmsgq: 'off'
+ notes: none
+ nsem: 'off'
+ nsemop: 'off'
+ nshm: 'off'
+ nthr: 'off'
+ openfiles: 'off'
+ origin: readonly
+ owner: root
+ pcpu: 'off'
+ plugin_name: none
+ plugin_repository: none
+ priority: '99'
+ pseudoterminals: 'off'
+ quota: none
+ readbps: 'off'
+ readiops: 'off'
+ release: 14.2-RELEASE-p3
+ reservation: none
+ resolver: /etc/resolv.conf
+ rlimits: 'off'
+ rtsold: '0'
+ securelevel: '2'
+ shmsize: 'off'
+ source_template: ansible_client
+ stacksize: 'off'
+ state: up
+ stop_timeout: '30'
+ swapuse: 'off'
+ sync_state: none
+ sync_target: none
+ sync_tgt_zpool: none
+ sysvmsg: new
+ sysvsem: new
+ sysvshm: new
+ template: '0'
+ type: jail
+ used: readonly
+ vmemoryuse: 'off'
+ vnet: '1'
+ vnet0_mac: 02a0983da05d 02a0983da05e
+ vnet0_mtu: auto
+ vnet1_mac: none
+ vnet1_mtu: auto
+ vnet2_mac: none
+ vnet2_mtu: auto
+ vnet3_mac: none
+ vnet3_mtu: auto
+ vnet_default_interface: auto
+ vnet_default_mtu: '1500'
+ vnet_interfaces: none
+ wallclock: 'off'
+ writebps: 'off'
+ writeiops: 'off'
+
+ PLAY RECAP **********************************************************************************************************
+ srv_3 : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0
diff --git a/docs/docsite/rst/guide_iocage_inventory_tags.rst b/docs/docsite/rst/guide_iocage_inventory_tags.rst
new file mode 100644
index 0000000000..8adf641073
--- /dev/null
+++ b/docs/docsite/rst/guide_iocage_inventory_tags.rst
@@ -0,0 +1,117 @@
+..
+ Copyright (c) Ansible Project
+ GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+ SPDX-License-Identifier: GPL-3.0-or-later
+
+.. _ansible_collections.community.general.docsite.guide_iocage.guide_iocage_inventory.guide_iocage_inventory_tags:
+
+Tags
+----
+
+Quoting `man iocage `_
+
+.. code-block:: text
+
+ PROPERTIES
+ ...
+ notes="any string"
+ Custom notes for miscellaneous tagging.
+ Default: none
+ Source: local
+
+We will use the format ``notes="tag1=value1 tag2=value2 ..."``.
+
+.. note::
+
+ The iocage tags have nothing to do with the :ref:`tags`.
+
+As root at the iocage host, set notes. For example,
+
+.. code-block:: console
+
+ shell> iocage set notes="vmm=iocage_02 project=foo" srv_1
+ notes: none -> vmm=iocage_02 project=foo
+ shell> iocage set notes="vmm=iocage_02 project=foo" srv_2
+ notes: none -> vmm=iocage_02 project=foo
+ shell> iocage set notes="vmm=iocage_02 project=bar" srv_3
+ notes: none -> vmm=iocage_02 project=bar
+
+Update the inventory configuration. Compose a dictionary *iocage_tags* and create groups. The option
+:ansopt:`community.general.iocage#inventory:get_properties` must be enabled.
+For example, ``hosts/02_iocage.yml`` could look like:
+
+.. code-block:: yaml
+
+ plugin: community.general.iocage
+ host: 10.1.0.73
+ user: admin
+ get_properties: true
+ hooks_results:
+ - /var/db/dhclient-hook.address.epair0b
+ compose:
+ ansible_host: (iocage_hooks.0 == '-') | ternary(iocage_ip4, iocage_hooks.0)
+ iocage_tags: dict(iocage_properties.notes | split | map('split', '='))
+ keyed_groups:
+ - prefix: vmm
+ key: iocage_tags.vmm
+ - prefix: project
+ key: iocage_tags.project
+
+Display tags and groups. Create a playbook ``pb-test-groups.yml``:
+
+.. code-block:: yaml+jinja
+
+ - hosts: all
+ remote_user: admin
+
+ vars:
+
+ ansible_python_interpreter: auto_silent
+
+ tasks:
+
+ - debug:
+ var: iocage_tags
+
+ - debug:
+ msg: |
+ {% for group in groups %}
+ {{ group }}: {{ groups[group] }}
+ {% endfor %}
+ run_once: true
+
+Run the playbook:
+
+.. code-block:: console
+
+ shell> ansible-playbook -i hosts/02_iocage.yml pb-test-groups.yml
+
+ PLAY [all] **********************************************************************************************************
+
+ TASK [debug] ********************************************************************************************************
+ ok: [srv_1] =>
+ iocage_tags:
+ project: foo
+ vmm: iocage_02
+ ok: [srv_2] =>
+ iocage_tags:
+ project: foo
+ vmm: iocage_02
+ ok: [srv_3] =>
+ iocage_tags:
+ project: bar
+ vmm: iocage_02
+
+ TASK [debug] ********************************************************************************************************
+ ok: [srv_1] =>
+ msg: |-
+ all: ['srv_1', 'srv_2', 'srv_3']
+ ungrouped: []
+ vmm_iocage_02: ['srv_1', 'srv_2', 'srv_3']
+ project_foo: ['srv_1', 'srv_2']
+ project_bar: ['srv_3']
+
+ PLAY RECAP **********************************************************************************************************
+ srv_1 : ok=2 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0
+ srv_2 : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0
+ srv_3 : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0
diff --git a/docs/docsite/rst/guide_modulehelper.rst b/docs/docsite/rst/guide_modulehelper.rst
new file mode 100644
index 0000000000..711cdc7f99
--- /dev/null
+++ b/docs/docsite/rst/guide_modulehelper.rst
@@ -0,0 +1,559 @@
+..
+ Copyright (c) Ansible Project
+ GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+ SPDX-License-Identifier: GPL-3.0-or-later
+
+.. _ansible_collections.community.general.docsite.guide_modulehelper:
+
+Module Helper guide
+===================
+
+
+Introduction
+^^^^^^^^^^^^
+
+Writing a module for Ansible is largely described in existing documentation.
+However, a good part of that is boilerplate code that needs to be repeated every single time.
+That is where ``ModuleHelper`` comes to assistance: a lot of that boilerplate code is done.
+
+.. _ansible_collections.community.general.docsite.guide_modulehelper.quickstart:
+
+Quickstart
+""""""""""
+
+See the `example from Ansible documentation `_
+written with ``ModuleHelper``.
+But bear in mind that it does not showcase all of MH's features:
+
+.. code-block:: python
+
+ from ansible_collections.community.general.plugins.module_utils.module_helper import ModuleHelper
+
+
+ class MyTest(ModuleHelper):
+ module = dict(
+ argument_spec=dict(
+ name=dict(type='str', required=True),
+ new=dict(type='bool', required=False, default=False),
+ ),
+ supports_check_mode=True,
+ )
+
+ def __run__(self):
+ self.vars.original_message = ''
+ self.vars.message = ''
+ if self.check_mode:
+ return
+ self.vars.original_message = self.vars.name
+ self.vars.message = 'goodbye'
+ self.changed = self.vars['new']
+ if self.vars.name == "fail me":
+ self.do_raise("You requested this to fail")
+
+
+ def main():
+ MyTest.execute()
+
+
+ if __name__ == '__main__':
+ main()
+
+
+Module Helper
+^^^^^^^^^^^^^
+
+Introduction
+""""""""""""
+
+``ModuleHelper`` is a wrapper around the standard ``AnsibleModule``, providing extra features and conveniences.
+The basic structure of a module using ``ModuleHelper`` is as shown in the
+:ref:`ansible_collections.community.general.docsite.guide_modulehelper.quickstart`
+section above, but there are more elements that will take part in it.
+
+.. code-block:: python
+
+ from ansible_collections.community.general.plugins.module_utils.module_helper import ModuleHelper
+
+
+ class MyTest(ModuleHelper):
+ # behavior for module paramaters ONLY, see below for further information
+ output_params = ()
+ change_params = ()
+ diff_params = ()
+ facts_params = ()
+
+ facts_name = None # used if generating facts, from parameters or otherwise
+
+ module = dict(
+ argument_spec=dict(...),
+ # ...
+ )
+
+After importing the ``ModuleHelper`` class, you need to declare your own class extending it.
+
+.. seealso::
+
+ There is a variation called ``StateModuleHelper``, which builds on top of the features provided by MH.
+ See :ref:`ansible_collections.community.general.docsite.guide_modulehelper.statemh` below for more details.
+
+The easiest way of specifying the module is to create the class variable ``module`` with a dictionary
+containing the exact arguments that would be passed as parameters to ``AnsibleModule``.
+If you prefer to create the ``AnsibleModule`` object yourself, just assign it to the ``module`` class variable.
+MH also accepts a parameter ``module`` in its constructor, if that parameter is used used,
+then it will override the class variable. The parameter can either be ``dict`` or ``AnsibleModule`` as well.
+
+Beyond the definition of the module, there are other variables that can be used to control aspects
+of MH's behavior. These variables should be set at the very beginning of the class, and their semantics are
+explained through this document.
+
+The main logic of MH happens in the ``ModuleHelper.run()`` method, which looks like:
+
+.. code-block:: python
+
+ @module_fails_on_exception
+ def run(self):
+ self.__init_module__()
+ self.__run__()
+ self.__quit_module__()
+ output = self.output
+ if 'failed' not in output:
+ output['failed'] = False
+ self.module.exit_json(changed=self.has_changed(), **output)
+
+The method ``ModuleHelper.__run__()`` must be implemented by the module and most
+modules will be able to perform their actions implementing only that MH method.
+However, in some cases, you might want to execute actions before or after the main tasks, in which cases
+you should implement ``ModuleHelper.__init_module__()`` and ``ModuleHelper.__quit_module__()`` respectively.
+
+Note that the output comes from ``self.output``, which is a ``@property`` method.
+By default, that property will collect all the variables that are marked for output and return them in a dictionary with their values.
+Moreover, the default ``self.output`` will also handle Ansible ``facts`` and *diff mode*.
+Also note the changed status comes from ``self.has_changed()``, which is usually calculated from variables that are marked
+to track changes in their content.
+
+.. seealso::
+
+ More details in sections
+ :ref:`ansible_collections.community.general.docsite.guide_modulehelper.paramvaroutput` and
+ :ref:`ansible_collections.community.general.docsite.guide_modulehelper.changes` below.
+
+.. seealso::
+
+ See more about the decorator
+ :ref:`ansible_collections.community.general.docsite.guide_modulehelper.modulefailsdeco` below.
+
+
+Another way to write the example from the
+:ref:`ansible_collections.community.general.docsite.guide_modulehelper.quickstart`
+would be:
+
+.. code-block:: python
+
+ def __init_module__(self):
+ self.vars.original_message = ''
+ self.vars.message = ''
+
+ def __run__(self):
+ if self.check_mode:
+ return
+ self.vars.original_message = self.vars.name
+ self.vars.message = 'goodbye'
+ self.changed = self.vars['new']
+
+ def __quit_module__(self):
+ if self.vars.name == "fail me":
+ self.do_raise("You requested this to fail")
+
+Notice that there are no calls to ``module.exit_json()`` nor ``module.fail_json()``: if the module fails, raise an exception.
+You can use the convenience method ``self.do_raise()`` or raise the exception as usual in Python to do that.
+If no exception is raised, then the module succeeds.
+
+.. seealso::
+
+ See more about exceptions in section
+ :ref:`ansible_collections.community.general.docsite.guide_modulehelper.exceptions` below.
+
+Ansible modules must have a ``main()`` function and the usual test for ``'__main__'``. When using MH that should look like:
+
+.. code-block:: python
+
+ def main():
+ MyTest.execute()
+
+
+ if __name__ == '__main__':
+ main()
+
+The class method ``execute()`` is nothing more than a convenience shorcut for:
+
+.. code-block:: python
+
+ m = MyTest()
+ m.run()
+
+Optionally, an ``AnsibleModule`` may be passed as parameter to ``execute()``.
+
+.. _ansible_collections.community.general.docsite.guide_modulehelper.paramvaroutput:
+
+Parameters, variables, and output
+"""""""""""""""""""""""""""""""""
+
+All the parameters automatically become variables in the ``self.vars`` attribute, which is of the ``VarDict`` type.
+By using ``self.vars``, you get a central mechanism to access the parameters but also to expose variables as return values of the module.
+As described in :ref:`ansible_collections.community.general.docsite.guide_vardict`, variables in ``VarDict`` have metadata associated to them.
+One of the attributes in that metadata marks the variable for output, and MH makes use of that to generate the module's return values.
+
+.. note::
+
+ The ``VarDict`` class was introduced in community.general 7.1.0, as part of ``ModuleHelper`` itself.
+ However, it has been factored out to become an utility on its own, described in :ref:`ansible_collections.community.general.docsite.guide_vardict`,
+ and the older implementation was removed in community.general 11.0.0.
+
+ Some code might still refer to the class variables ``use_old_vardict`` and ``mute_vardict_deprecation``, used for the transtition to the new
+ implementation but from community.general 11.0.0 onwards they are no longer used and can be safely removed from the code.
+
+Contrary to new variables created in ``VarDict``, module parameters are not set for output by default.
+If you want to include some module parameters in the output, list them in the ``output_params`` class variable.
+
+.. code-block:: python
+
+ class MyTest(ModuleHelper):
+ output_params = ('state', 'name')
+ ...
+
+.. important::
+
+ The variable names listed in ``output_params`` **must be module parameters**, as in parameters listed in the module's ``argument_spec``.
+ Names not found in ``argument_spec`` are silently ignored.
+
+Another neat feature provided by MH by using ``VarDict`` is the automatic tracking of changes when setting the metadata ``change=True``.
+Again, to enable this feature for module parameters, you must list them in the ``change_params`` class variable.
+
+.. code-block:: python
+
+ class MyTest(ModuleHelper):
+ # example from community.general.xfconf
+ change_params = ('value', )
+ ...
+
+.. important::
+
+ The variable names listed in ``change_params`` **must be module parameters**, as in parameters listed in the module's ``argument_spec``.
+ Names not found in ``argument_spec`` are silently ignored.
+
+.. seealso::
+
+ See more about this in
+ :ref:`ansible_collections.community.general.docsite.guide_modulehelper.changes` below.
+
+Similarly, if you want to use Ansible's diff mode, you can set the metadata ``diff=True`` and ``diff_params`` for module parameters.
+With that, MH will automatically generate the diff output for variables that have changed.
+
+.. code-block:: python
+
+ class MyTest(ModuleHelper):
+ diff_params = ('value', )
+
+ def __run__(self):
+ # example from community.general.gio_mime
+ self.vars.set_meta("handler", initial_value=gio_mime_get(self.runner, self.vars.mime_type), diff=True, change=True)
+
+.. important::
+
+ The variable names listed in ``diff_params`` **must be module parameters**, as in parameters listed in the module's ``argument_spec``.
+ Names not found in ``argument_spec`` are silently ignored.
+
+Moreover, if a module is set to return *facts* instead of return values, then again use the metadata ``fact=True`` and ``fact_params`` for module parameters.
+Additionally, you must specify ``facts_name``, as in:
+
+.. code-block:: python
+
+ class VolumeFacts(ModuleHelper):
+ facts_name = 'volume_facts'
+
+ def __init_module__(self):
+ self.vars.set("volume", 123, fact=True)
+
+That generates an Ansible fact like:
+
+.. code-block:: yaml+jinja
+
+ - name: Obtain volume facts
+ some.collection.volume_facts:
+ # parameters
+
+ - name: Print volume facts
+ debug:
+ msg: Volume fact is {{ ansible_facts.volume_facts.volume }}
+
+.. important::
+
+ The variable names listed in ``fact_params`` **must be module parameters**, as in parameters listed in the module's ``argument_spec``.
+ Names not found in ``argument_spec`` are silently ignored.
+
+.. important::
+
+ If ``facts_name`` is not set, the module does not generate any facts.
+
+
+.. _ansible_collections.community.general.docsite.guide_modulehelper.changes:
+
+Handling changes
+""""""""""""""""
+
+In MH there are many ways to indicate change in the module execution. Here they are:
+
+Tracking changes in variables
+-----------------------------
+
+As explained above, you can enable change tracking in any number of variables in ``self.vars``.
+By the end of the module execution, if any of those variables has a value different then the first value assigned to them,
+then that will be picked up by MH and signalled as changed at the module output.
+See the example below to learn how you can enabled change tracking in variables:
+
+.. code-block:: python
+
+ # using __init_module__() as example, it works the same in __run__() and __quit_module__()
+ def __init_module__(self):
+ # example from community.general.ansible_galaxy_install
+ self.vars.set("new_roles", {}, change=True)
+
+ # example of "hidden" variable used only to track change in a value from community.general.gconftool2
+ self.vars.set('_value', self.vars.previous_value, output=False, change=True)
+
+ # enable change-tracking without assigning value
+ self.vars.set_meta("new_roles", change=True)
+
+ # if you must forcibly set an initial value to the variable
+ self.vars.set_meta("new_roles", initial_value=[])
+ ...
+
+If the end value of any variable marked ``change`` is different from its initial value, then MH will return ``changed=True``.
+
+Indicating changes with ``changed``
+-----------------------------------
+
+If you want to indicate change directly in the code, then use the ``self.changed`` property in MH.
+Beware that this is a ``@property`` method in MH, with both a *getter* and a *setter*.
+By default, that hidden field is set to ``False``.
+
+Effective change
+----------------
+
+The effective outcome for the module is determined in the ``self.has_changed()`` method, and it consists of the logical *OR* operation
+between ``self.changed`` and the change calculated from ``self.vars``.
+
+.. _ansible_collections.community.general.docsite.guide_modulehelper.exceptions:
+
+Exceptions
+""""""""""
+
+In MH, instead of calling ``module.fail_json()`` you can just raise an exception.
+The output variables are collected the same way they would be for a successful execution.
+However, you can set output variables specifically for that exception, if you so choose.
+
+.. code-block:: python
+
+ from ansible_collections.community.general.plugins.module_utils.module_helper import ModuleHelperException
+
+ def __init_module__(self):
+ if not complex_validation():
+ self.do_raise("Validation failed!")
+
+ # Or passing output variables
+ awesomeness = calculate_awesomeness()
+ if awesomeness > 1000:
+ self.do_raise("Over awesome, I cannot handle it!", update_output={"awesomeness": awesomeness})
+ # which is just a convenience shortcut for
+ raise ModuleHelperException("...", update_output={...})
+
+All exceptions derived from ``Exception`` are captured and translated into a ``fail_json()`` call.
+However, if you do want to call ``self.module.fail_json()`` yourself it will work,
+just keep in mind that there will be no automatic handling of output variables in that case.
+
+Behind the curtains, all ``do_raise()`` does is to raise a ``ModuleHelperException``.
+If you want to create specialized error handling for your code, the best way is to extend that clas and raise it when needed.
+
+.. _ansible_collections.community.general.docsite.guide_modulehelper.statemh:
+
+StateModuleHelper
+^^^^^^^^^^^^^^^^^
+
+Many modules use a parameter ``state`` that effectively controls the exact action performed by the module, such as
+``state=present`` or ``state=absent`` for installing or removing packages.
+By using ``StateModuleHelper`` you can make your code like the excerpt from the ``gconftool2`` below:
+
+.. code-block:: python
+
+ from ansible_collections.community.general.plugins.module_utils.module_helper import StateModuleHelper
+
+ class GConftool(StateModuleHelper):
+ ...
+ module = dict(
+ ...
+ )
+
+ def __init_module__(self):
+ self.runner = gconftool2_runner(self.module, check_rc=True)
+ ...
+
+ self.vars.set('previous_value', self._get(), fact=True)
+ self.vars.set('value_type', self.vars.value_type)
+ self.vars.set('_value', self.vars.previous_value, output=False, change=True)
+ self.vars.set_meta('value', initial_value=self.vars.previous_value)
+ self.vars.set('playbook_value', self.vars.value, fact=True)
+
+ ...
+
+ def state_absent(self):
+ with self.runner("state key", output_process=self._make_process(False)) as ctx:
+ ctx.run()
+ self.vars.set('run_info', ctx.run_info, verbosity=4)
+ self.vars.set('new_value', None, fact=True)
+ self.vars._value = None
+
+ def state_present(self):
+ with self.runner("direct config_source value_type state key value", output_process=self._make_process(True)) as ctx:
+ ctx.run()
+ self.vars.set('run_info', ctx.run_info, verbosity=4)
+ self.vars.set('new_value', self._get(), fact=True)
+ self.vars._value = self.vars.new_value
+
+Note that the method ``__run__()`` is implemented in ``StateModuleHelper``, all you need to implement are the methods ``state_``.
+In the example above, :ansplugin:`community.general.gconftool2#module` only has two states, ``present`` and ``absent``, thus, ``state_present()`` and ``state_absent()``.
+
+If the controlling parameter is not called ``state``, like in :ansplugin:`community.general.jira#module` module, just let SMH know about it:
+
+.. code-block:: python
+
+ class JIRA(StateModuleHelper):
+ state_param = 'operation'
+
+ def operation_create(self):
+ ...
+
+ def operation_search(self):
+ ...
+
+Lastly, if the module is called with ``state=somevalue`` and the method ``state_somevalue``
+is not implemented, SMH will resort to call a method called ``__state_fallback__()``.
+By default, this method will raise a ``ValueError`` indicating the method was not found.
+Naturally, you can override that method to write a default implementation, as in :ansplugin:`community.general.locale_gen#module`:
+
+.. code-block:: python
+
+ def __state_fallback__(self):
+ if self.vars.state_tracking == self.vars.state:
+ return
+ if self.vars.ubuntu_mode:
+ self.apply_change_ubuntu(self.vars.state, self.vars.name)
+ else:
+ self.apply_change(self.vars.state, self.vars.name)
+
+That module has only the states ``present`` and ``absent`` and the code for both is the one in the fallback method.
+
+.. note::
+
+ The name of the fallback method **does not change** if you set a different value of ``state_param``.
+
+
+Other Conveniences
+^^^^^^^^^^^^^^^^^^
+
+Delegations to AnsibleModule
+""""""""""""""""""""""""""""
+
+The MH properties and methods below are delegated as-is to the underlying ``AnsibleModule`` instance in ``self.module``:
+
+- ``check_mode``
+- ``get_bin_path()``
+- ``warn()``
+- ``deprecate()``
+
+Additionally, MH will also delegate:
+
+- ``diff_mode`` to ``self.module._diff``
+- ``verbosity`` to ``self.module._verbosity``
+
+Starting in community.general 10.3.0, MH will also delegate the method ``debug`` to ``self.module``.
+If any existing module already has a ``debug`` attribute defined, a warning message will be generated,
+requesting it to be renamed. Upon the release of community.general 12.0.0, the delegation will be
+preemptive and will override any existing method or property in the subclasses.
+
+Decorators
+""""""""""
+
+The following decorators should only be used within ``ModuleHelper`` class.
+
+@cause_changes
+--------------
+
+This decorator will control whether the outcome of the method will cause the module to signal change in its output.
+If the method completes without raising an exception it is considered to have succeeded, otherwise, it will have failed.
+
+The decorator has a parameter ``when`` that accepts three different values: ``success``, ``failure``, and ``always``.
+There are also two legacy parameters, ``on_success`` and ``on_failure``, that will be deprecated, so do not use them.
+The value of ``changed`` in the module output will be set to ``True``:
+
+- ``when="success"`` and the method completes without raising an exception.
+- ``when="failure"`` and the method raises an exception.
+- ``when="always"``, regardless of the method raising an exception or not.
+
+.. code-block:: python
+
+ from ansible_collections.community.general.plugins.module_utils.module_helper import cause_changes
+
+ # adapted excerpt from the community.general.jira module
+ class JIRA(StateModuleHelper):
+ @cause_changes(when="success")
+ def operation_create(self):
+ ...
+
+If ``when`` has a different value or no parameters are specificied, the decorator will have no effect whatsoever.
+
+.. _ansible_collections.community.general.docsite.guide_modulehelper.modulefailsdeco:
+
+@module_fails_on_exception
+--------------------------
+
+In a method using this decorator, if an exception is raised, the text message of that exception will be captured
+by the decorator and used to call ``self.module.fail_json()``.
+In most of the cases there will be no need to use this decorator, because ``ModuleHelper.run()`` already uses it.
+
+@check_mode_skip
+----------------
+
+If the module is running in check mode, this decorator will prevent the method from executing.
+The return value in that case is ``None``.
+
+.. code-block:: python
+
+ from ansible_collections.community.general.plugins.module_utils.module_helper import check_mode_skip
+
+ # adapted excerpt from the community.general.locale_gen module
+ class LocaleGen(StateModuleHelper):
+ @check_mode_skip
+ def __state_fallback__(self):
+ ...
+
+
+@check_mode_skip_returns
+------------------------
+
+This decorator is similar to the previous one, but the developer can control the return value for the method when running in check mode.
+It is used with one of two parameters. One is ``callable`` and the return value in check mode will be ``callable(self, *args, **kwargs)``,
+where ``self`` is the ``ModuleHelper`` instance and the union of ``args`` and ``kwargs`` will contain all the parameters passed to the method.
+
+The other option is to use the parameter ``value``, in which case the method will return ``value`` when in check mode.
+
+
+References
+^^^^^^^^^^
+
+- `Ansible Developer Guide `_
+- `Creating a module `_
+- `Returning ansible facts `_
+- :ref:`ansible_collections.community.general.docsite.guide_vardict`
+
+
+.. versionadded:: 3.1.0
diff --git a/docs/docsite/rst/guide_online.rst b/docs/docsite/rst/guide_online.rst
new file mode 100644
index 0000000000..c233b403e8
--- /dev/null
+++ b/docs/docsite/rst/guide_online.rst
@@ -0,0 +1,49 @@
+..
+ Copyright (c) Ansible Project
+ GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+ SPDX-License-Identifier: GPL-3.0-or-later
+
+.. _ansible_collections.community.general.docsite.guide_online:
+
+****************
+Online.net Guide
+****************
+
+Introduction
+============
+
+Online is a French hosting company mainly known for providing bare-metal servers named Dedibox.
+Check it out: `https://www.online.net/en `_
+
+Dynamic inventory for Online resources
+--------------------------------------
+
+Ansible has a dynamic inventory plugin that can list your resources.
+
+1. Create a YAML configuration such as ``online_inventory.yml`` with this content:
+
+ .. code-block:: yaml
+
+ plugin: community.general.online
+
+2. Set your ``ONLINE_TOKEN`` environment variable with your token.
+
+ You need to open an account and log into it before you can get a token.
+ You can find your token at the following page: `https://console.online.net/en/api/access `_
+
+3. You can test that your inventory is working by running:
+
+ .. code-block:: console
+
+ $ ansible-inventory -v -i online_inventory.yml --list
+
+
+4. Now you can run your playbook or any other module with this inventory:
+
+ .. code-block:: ansible-output
+
+ $ ansible all -i online_inventory.yml -m ping
+ sd-96735 | SUCCESS => {
+ "changed": false,
+ "ping": "pong"
+ }
diff --git a/docs/docsite/rst/guide_packet.rst b/docs/docsite/rst/guide_packet.rst
new file mode 100644
index 0000000000..95b38dddd0
--- /dev/null
+++ b/docs/docsite/rst/guide_packet.rst
@@ -0,0 +1,214 @@
+..
+ Copyright (c) Ansible Project
+ GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+ SPDX-License-Identifier: GPL-3.0-or-later
+
+.. _ansible_collections.community.general.docsite.guide_packet:
+
+**********************************
+Packet.net Guide
+**********************************
+
+Introduction
+============
+
+`Packet.net `_ is a bare metal infrastructure host that is supported by the community.general collection through six cloud modules. The six modules are:
+
+- :ansplugin:`community.general.packet_device#module`: manages servers on Packet. You can use this module to create, restart and delete devices.
+- :ansplugin:`community.general.packet_ip_subnet#module`: assign IP subnet to a bare metal server
+- :ansplugin:`community.general.packet_project#module`: create/delete a project in Packet host
+- :ansplugin:`community.general.packet_sshkey#module`: adds a public SSH key from file or value to the Packet infrastructure. Every subsequently-created device will have this public key installed in .ssh/authorized_keys.
+- :ansplugin:`community.general.packet_volume#module`: create/delete a volume in Packet host
+- :ansplugin:`community.general.packet_volume_attachment#module`: attach/detach a volume to a device in the Packet host
+
+Note, this guide assumes you are familiar with Ansible and how it works. If you are not, have a look at their :ref:`docs ` before getting started.
+
+Requirements
+============
+
+The Packet modules connect to the Packet API using the `packet-python package `_. You can install it with pip:
+
+.. code-block:: console
+
+ $ pip install packet-python
+
+In order to check the state of devices created by Ansible on Packet, it is a good idea to install one of the `Packet CLI clients `_. Otherwise you can check them through the `Packet portal `_.
+
+To use the modules you will need a Packet API token. You can generate an API token through the Packet portal `here `__. The simplest way to authenticate yourself is to set the Packet API token in an environment variable:
+
+.. code-block:: console
+
+ $ export PACKET_API_TOKEN=Bfse9F24SFtfs423Gsd3ifGsd43sSdfs
+
+If you are not comfortable exporting your API token, you can pass it as a parameter to the modules.
+
+On Packet, devices and reserved IP addresses belong to `projects `_. In order to use the packet_device module, you need to specify the UUID of the project in which you want to create or manage devices. You can find a project's UUID in the Packet portal `here `_ (it is just under the project table) or through one of the available `CLIs `_.
+
+
+If you want to use a new SSH key pair in this tutorial, you can generate it to ``./id_rsa`` and ``./id_rsa.pub`` as:
+
+.. code-block:: console
+
+ $ ssh-keygen -t rsa -f ./id_rsa
+
+If you want to use an existing key pair, just copy the private and public key over to the playbook directory.
+
+
+Device Creation
+===============
+
+The following code block is a simple playbook that creates one `Type 0 `_ server (the ``plan`` parameter). You have to supply ``plan`` and ``operating_system``. ``location`` defaults to ``ewr1`` (Parsippany, NJ). You can find all the possible values for the parameters through a `CLI client `_.
+
+.. code-block:: yaml+jinja
+
+ # playbook_create.yml
+
+ - name: Create Ubuntu device
+ hosts: localhost
+ tasks:
+
+ - community.general.packet_sshkey:
+ key_file: ./id_rsa.pub
+ label: tutorial key
+
+ - community.general.packet_device:
+ project_id:
+ hostnames: myserver
+ operating_system: ubuntu_16_04
+ plan: baremetal_0
+ facility: sjc1
+
+After running ``ansible-playbook playbook_create.yml``, you should have a server provisioned on Packet. You can verify through a CLI or in the `Packet portal `__.
+
+If you get an error with the message "failed to set machine state present, error: Error 404: Not Found", please verify your project UUID.
+
+
+Updating Devices
+================
+
+The two parameters used to uniquely identify Packet devices are: "device_ids" and "hostnames". Both parameters accept either a single string (later converted to a one-element list), or a list of strings.
+
+The ``device_ids`` and ``hostnames`` parameters are mutually exclusive. The following values are all acceptable:
+
+- device_ids: ``a27b7a83-fc93-435b-a128-47a5b04f2dcf``
+
+- hostnames: ``mydev1``
+
+- device_ids: ``[a27b7a83-fc93-435b-a128-47a5b04f2dcf, 4887130f-0ccd-49a0-99b0-323c1ceb527b]``
+
+- hostnames: ``[mydev1, mydev2]``
+
+In addition, hostnames can contain a special ``%d`` formatter along with a ``count`` parameter that lets you easily expand hostnames that follow a simple name and number pattern; in other words, ``hostnames: "mydev%d", count: 2`` will expand to [mydev1, mydev2].
+
+If your playbook acts on existing Packet devices, you can only pass the ``hostname`` and ``device_ids`` parameters. The following playbook shows how you can reboot a specific Packet device by setting the ``hostname`` parameter:
+
+.. code-block:: yaml+jinja
+
+ # playbook_reboot.yml
+
+ - name: reboot myserver
+ hosts: localhost
+ tasks:
+
+ - community.general.packet_device:
+ project_id:
+ hostnames: myserver
+ state: rebooted
+
+You can also identify specific Packet devices with the ``device_ids`` parameter. The device's UUID can be found in the `Packet Portal `_ or by using a `CLI `_. The following playbook removes a Packet device using the ``device_ids`` field:
+
+.. code-block:: yaml+jinja
+
+ # playbook_remove.yml
+
+ - name: remove a device
+ hosts: localhost
+ tasks:
+
+ - community.general.packet_device:
+ project_id:
+ device_ids:
+ state: absent
+
+
+More Complex Playbooks
+======================
+
+In this example, we will create a CoreOS cluster with `user data `_.
+
+
+The CoreOS cluster will use `etcd `_ for discovery of other servers in the cluster. Before provisioning your servers, you will need to generate a discovery token for your cluster:
+
+.. code-block:: console
+
+ $ curl -w "\n" 'https://discovery.etcd.io/new?size=3'
+
+The following playbook will create an SSH key, 3 Packet servers, and then wait until SSH is ready (or until 5 minutes passed). Make sure to substitute the discovery token URL in ``user_data``, and the ``project_id`` before running ``ansible-playbook``. Also, feel free to change ``plan`` and ``facility``.
+
+.. code-block:: yaml+jinja
+
+ # playbook_coreos.yml
+
+ - name: Start 3 CoreOS nodes in Packet and wait until SSH is ready
+ hosts: localhost
+ tasks:
+
+ - community.general.packet_sshkey:
+ key_file: ./id_rsa.pub
+ label: new
+
+ - community.general.packet_device:
+ hostnames: [coreos-one, coreos-two, coreos-three]
+ operating_system: coreos_beta
+ plan: baremetal_0
+ facility: ewr1
+ project_id:
+ wait_for_public_IPv: 4
+ user_data: |
+ # cloud-config
+ coreos:
+ etcd2:
+ discovery: https://discovery.etcd.io/
+ advertise-client-urls: http://$private_ipv4:2379,http://$private_ipv4:4001
+ initial-advertise-peer-urls: http://$private_ipv4:2380
+ listen-client-urls: http://0.0.0.0:2379,http://0.0.0.0:4001
+ listen-peer-urls: http://$private_ipv4:2380
+ fleet:
+ public-ip: $private_ipv4
+ units:
+ - name: etcd2.service
+ command: start
+ - name: fleet.service
+ command: start
+ register: newhosts
+
+ - name: wait for ssh
+ ansible.builtin.wait_for:
+ delay: 1
+ host: "{{ item.public_ipv4 }}"
+ port: 22
+ state: started
+ timeout: 500
+ loop: "{{ newhosts.results[0].devices }}"
+
+
+As with most Ansible modules, the default states of the Packet modules are idempotent, meaning the resources in your project will remain the same after re-runs of a playbook. Thus, we can keep the ``packet_sshkey`` module call in our playbook. If the public key is already in your Packet account, the call will have no effect.
+
+The second module call provisions 3 Packet Type 0 (specified using the ``plan`` parameter) servers in the project identified by the ``project_id`` parameter. The servers are all provisioned with CoreOS beta (the ``operating_system`` parameter) and are customized with cloud-config user data passed to the ``user_data`` parameter.
+
+The ``packet_device`` module has a ``wait_for_public_IPv`` that is used to specify the version of the IP address to wait for (valid values are ``4`` or ``6`` for IPv4 or IPv6). If specified, Ansible will wait until the GET API call for a device contains an Internet-routeable IP address of the specified version. When referring to an IP address of a created device in subsequent module calls, it is wise to use the ``wait_for_public_IPv`` parameter, or ``state: active`` in the packet_device module call.
+
+Run the playbook:
+
+.. code-block:: console
+
+ $ ansible-playbook playbook_coreos.yml
+
+Once the playbook quits, your new devices should be reachable through SSH. Try to connect to one and check if etcd has started properly:
+
+.. code-block:: console
+
+ tomk@work $ ssh -i id_rsa core@$one_of_the_servers_ip
+ core@coreos-one ~ $ etcdctl cluster-health
+
+If you have any questions or comments let us know! help@packet.net
diff --git a/docs/docsite/rst/guide_scaleway.rst b/docs/docsite/rst/guide_scaleway.rst
new file mode 100644
index 0000000000..f3b7b24e0e
--- /dev/null
+++ b/docs/docsite/rst/guide_scaleway.rst
@@ -0,0 +1,320 @@
+..
+ Copyright (c) Ansible Project
+ GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+ SPDX-License-Identifier: GPL-3.0-or-later
+
+.. _ansible_collections.community.general.docsite.guide_scaleway:
+
+**************
+Scaleway Guide
+**************
+
+Introduction
+============
+
+`Scaleway `_ is a cloud provider supported by the community.general collection through a set of plugins and modules.
+Those modules are:
+
+- :ansplugin:`community.general.scaleway_compute#module`: manages servers on Scaleway. You can use this module to create, restart and delete servers.
+- :ansplugin:`community.general.scaleway_compute_private_network#module`
+- :ansplugin:`community.general.scaleway_container#module`
+- :ansplugin:`community.general.scaleway_container_info#module`
+- :ansplugin:`community.general.scaleway_container_namespace_info#module`
+- :ansplugin:`community.general.scaleway_container_namespace#module`
+- :ansplugin:`community.general.scaleway_container_registry_info#module`
+- :ansplugin:`community.general.scaleway_container_registry#module`
+- :ansplugin:`community.general.scaleway_database_backup#module`
+- :ansplugin:`community.general.scaleway_function#module`
+- :ansplugin:`community.general.scaleway_function_info#module`
+- :ansplugin:`community.general.scaleway_function_namespace_info#module`
+- :ansplugin:`community.general.scaleway_function_namespace#module`
+- :ansplugin:`community.general.scaleway_image_info#module`
+- :ansplugin:`community.general.scaleway_ip#module`
+- :ansplugin:`community.general.scaleway_ip_info#module`
+- :ansplugin:`community.general.scaleway_lb#module`
+- :ansplugin:`community.general.scaleway_organization_info#module`
+- :ansplugin:`community.general.scaleway_private_network#module`
+- :ansplugin:`community.general.scaleway_security_group#module`
+- :ansplugin:`community.general.scaleway_security_group_info#module`
+- :ansplugin:`community.general.scaleway_security_group_rule#module`
+- :ansplugin:`community.general.scaleway_server_info#module`
+- :ansplugin:`community.general.scaleway_snapshot_info#module`
+- :ansplugin:`community.general.scaleway_sshkey#module`: adds a public SSH key from a file or value to the Packet infrastructure. Every subsequently-created device will have this public key installed in .ssh/authorized_keys.
+- :ansplugin:`community.general.scaleway_user_data#module`
+- :ansplugin:`community.general.scaleway_volume#module`: manages volumes on Scaleway.
+- :ansplugin:`community.general.scaleway_volume_info#module`
+
+The plugins are:
+
+- :ansplugin:`community.general.scaleway#inventory`: inventory plugin
+
+
+.. note::
+ This guide assumes you are familiar with Ansible and how it works.
+ If you are not, have a look at :ref:`ansible_documentation` before getting started.
+
+Requirements
+============
+
+The Scaleway modules and inventory script connect to the Scaleway API using `Scaleway REST API `_.
+To use the modules and inventory script you will need a Scaleway API token.
+You can generate an API token through the `Scaleway console's credential page `__.
+The simplest way to authenticate yourself is to set the Scaleway API token in an environment variable:
+
+.. code-block:: console
+
+ $ export SCW_TOKEN=00000000-1111-2222-3333-444444444444
+
+If you are not comfortable exporting your API token, you can pass it as a parameter to the modules using the ``api_token`` argument.
+
+If you want to use a new SSH key pair in this tutorial, you can generate it to ``./id_rsa`` and ``./id_rsa.pub`` as:
+
+.. code-block:: console
+
+ $ ssh-keygen -t rsa -f ./id_rsa
+
+If you want to use an existing key pair, just copy the private and public key over to the playbook directory.
+
+How to add an SSH key?
+======================
+
+Connection to Scaleway Compute nodes use Secure Shell.
+SSH keys are stored at the account level, which means that you can reuse the same SSH key in multiple nodes.
+The first step to configure Scaleway compute resources is to have at least one SSH key configured.
+
+:ansplugin:`community.general.scaleway_sshkey#module` is a module that manages SSH keys on your Scaleway account.
+You can add an SSH key to your account by including the following task in a playbook:
+
+.. code-block:: yaml+jinja
+
+ - name: "Add SSH key"
+ community.general.scaleway_sshkey:
+ ssh_pub_key: "ssh-rsa AAAA..."
+ state: "present"
+
+The ``ssh_pub_key`` parameter contains your ssh public key as a string. Here is an example inside a playbook:
+
+
+.. code-block:: yaml+jinja
+
+ - name: Test SSH key lifecycle on a Scaleway account
+ hosts: localhost
+ gather_facts: false
+ environment:
+ SCW_API_KEY: ""
+
+ tasks:
+
+ - community.general.scaleway_sshkey:
+ ssh_pub_key: "ssh-rsa AAAAB...424242 developer@example.com"
+ state: present
+ register: result
+
+ - ansible.builtin.assert:
+ that:
+ - result is success and result is changed
+
+How to create a compute instance?
+=================================
+
+Now that we have an SSH key configured, the next step is to spin up a server!
+:ansplugin:`community.general.scaleway_compute#module` is a module that can create, update and delete Scaleway compute instances:
+
+.. code-block:: yaml+jinja
+
+ - name: Create a server
+ community.general.scaleway_compute:
+ name: foobar
+ state: present
+ image: 00000000-1111-2222-3333-444444444444
+ organization: 00000000-1111-2222-3333-444444444444
+ region: ams1
+ commercial_type: START1-S
+
+Here are the parameter details for the example shown above:
+
+- ``name`` is the name of the instance (the one that will show up in your web console).
+- ``image`` is the UUID of the system image you would like to use.
+ A list of all images is available for each availability zone.
+- ``organization`` represents the organization that your account is attached to.
+- ``region`` represents the Availability Zone which your instance is in (for this example, ``par1`` and ``ams1``).
+- ``commercial_type`` represents the name of the commercial offers.
+ You can check out the Scaleway pricing page to find which instance is right for you.
+
+Take a look at this short playbook to see a working example using ``scaleway_compute``:
+
+.. code-block:: yaml+jinja
+
+ - name: Test compute instance lifecycle on a Scaleway account
+ hosts: localhost
+ gather_facts: false
+ environment:
+ SCW_API_KEY: ""
+
+ tasks:
+
+ - name: Create a server
+ register: server_creation_task
+ community.general.scaleway_compute:
+ name: foobar
+ state: present
+ image: 00000000-1111-2222-3333-444444444444
+ organization: 00000000-1111-2222-3333-444444444444
+ region: ams1
+ commercial_type: START1-S
+ wait: true
+
+ - ansible.builtin.debug:
+ var: server_creation_task
+
+ - ansible.builtin.assert:
+ that:
+ - server_creation_task is success
+ - server_creation_task is changed
+
+ - name: Run it
+ community.general.scaleway_compute:
+ name: foobar
+ state: running
+ image: 00000000-1111-2222-3333-444444444444
+ organization: 00000000-1111-2222-3333-444444444444
+ region: ams1
+ commercial_type: START1-S
+ wait: true
+ tags:
+ - web_server
+ register: server_run_task
+
+ - ansible.builtin.debug:
+ var: server_run_task
+
+ - ansible.builtin.assert:
+ that:
+ - server_run_task is success
+ - server_run_task is changed
+
+Dynamic Inventory Plugin
+========================
+
+Ansible ships with :ansplugin:`community.general.scaleway#inventory`.
+You can now get a complete inventory of your Scaleway resources through this plugin and filter it on
+different parameters (``regions`` and ``tags`` are currently supported).
+
+Let us create an example!
+Suppose that we want to get all hosts that got the tag web_server.
+Create a file named ``scaleway_inventory.yml`` with the following content:
+
+.. code-block:: yaml+jinja
+
+ plugin: community.general.scaleway
+ regions:
+ - ams1
+ - par1
+ tags:
+ - web_server
+
+This inventory means that we want all hosts that got the tag ``web_server`` on the zones ``ams1`` and ``par1``.
+Once you have configured this file, you can get the information using the following command:
+
+.. code-block:: console
+
+ $ ansible-inventory --list -i scaleway_inventory.yml
+
+The output will be:
+
+.. code-block:: json
+
+ {
+ "_meta": {
+ "hostvars": {
+ "dd8e3ae9-0c7c-459e-bc7b-aba8bfa1bb8d": {
+ "ansible_verbosity": 6,
+ "arch": "x86_64",
+ "commercial_type": "START1-S",
+ "hostname": "foobar",
+ "ipv4": "192.0.2.1",
+ "organization": "00000000-1111-2222-3333-444444444444",
+ "state": "running",
+ "tags": [
+ "web_server"
+ ]
+ }
+ }
+ },
+ "all": {
+ "children": [
+ "ams1",
+ "par1",
+ "ungrouped",
+ "web_server"
+ ]
+ },
+ "ams1": {},
+ "par1": {
+ "hosts": [
+ "dd8e3ae9-0c7c-459e-bc7b-aba8bfa1bb8d"
+ ]
+ },
+ "ungrouped": {},
+ "web_server": {
+ "hosts": [
+ "dd8e3ae9-0c7c-459e-bc7b-aba8bfa1bb8d"
+ ]
+ }
+ }
+
+As you can see, we get different groups of hosts.
+``par1`` and ``ams1`` are groups based on location.
+``web_server`` is a group based on a tag.
+
+In case a filter parameter is not defined, the plugin supposes all values possible are wanted.
+This means that for each tag that exists on your Scaleway compute nodes, a group based on each tag will be created.
+
+Scaleway S3 object storage
+==========================
+
+`Object Storage `_ allows you to store any kind of objects (documents, images, videos, and so on).
+As the Scaleway API is S3 compatible, Ansible supports it natively through the amazon.aws modules: :ansplugin:`amazon.aws.s3_bucket#module`, :ansplugin:`amazon.aws.s3_object#module`.
+
+You can find many examples in the `scaleway_s3 integration tests `_.
+
+.. code-block:: yaml+jinja
+
+ - hosts: myserver
+ vars:
+ scaleway_region: nl-ams
+ s3_url: https://s3.nl-ams.scw.cloud
+ environment:
+ # AWS_ACCESS_KEY matches your scaleway organization id available at https://cloud.scaleway.com/#/account
+ AWS_ACCESS_KEY: 00000000-1111-2222-3333-444444444444
+ # AWS_SECRET_KEY matches a secret token that you can retrieve at https://cloud.scaleway.com/#/credentials
+ AWS_SECRET_KEY: aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee
+ module_defaults:
+ group/amazon.aws.aws:
+ s3_url: '{{ s3_url }}'
+ region: '{{ scaleway_region }}'
+ tasks:
+ # use a fact instead of a variable, otherwise template is evaluate each time variable is used
+ - ansible.builtin.set_fact:
+ bucket_name: "{{ 99999999 | random | to_uuid }}"
+
+ # "requester_pays:" is mandatory because Scaleway does not implement related API
+ # another way is to use amazon.aws.s3_object and "mode: create" !
+ - amazon.aws.s3_bucket:
+ name: '{{ bucket_name }}'
+ requester_pays:
+
+ - name: Another way to create the bucket
+ amazon.aws.s3_object:
+ bucket: '{{ bucket_name }}'
+ mode: create
+ encrypt: false
+ register: bucket_creation_check
+
+ - name: add something in the bucket
+ amazon.aws.s3_object:
+ mode: put
+ bucket: '{{ bucket_name }}'
+ src: /tmp/test.txt # needs to be created before
+ object: test.txt
+ encrypt: false # server side encryption must be disabled
diff --git a/docs/docsite/rst/guide_uthelper.rst b/docs/docsite/rst/guide_uthelper.rst
new file mode 100644
index 0000000000..c4a4110d70
--- /dev/null
+++ b/docs/docsite/rst/guide_uthelper.rst
@@ -0,0 +1,394 @@
+..
+ Copyright (c) Ansible Project
+ GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+ SPDX-License-Identifier: GPL-3.0-or-later
+
+.. _ansible_collections.community.general.docsite.guide_uthelper:
+
+UTHelper Guide
+==============
+
+Introduction
+^^^^^^^^^^^^
+
+``UTHelper`` was written to reduce the boilerplate code used in unit tests for modules.
+It was originally written to handle tests of modules that run external commands using ``AnsibleModule.run_command()``.
+At the time of writing (Feb 2025) that remains the only type of tests you can use
+``UTHelper`` for, but it aims to provide support for other types of interactions.
+
+Until now, there are many different ways to implement unit tests that validate a module based on the execution of external commands. See some examples:
+
+* `test_apk.py `_ - A very simple one
+* `test_bootc_manage.py `_ -
+ This one has more test cases, but do notice how the code is repeated amongst them.
+* `test_modprobe.py `_ -
+ This one has 15 tests in it, but to achieve that it declares 8 classes repeating quite a lot of code.
+
+As you can notice, there is no consistency in the way these tests are executed -
+they all do the same thing eventually, but each one is written in a very distinct way.
+
+``UTHelper`` aims to:
+
+* provide a consistent idiom to define unit tests
+* reduce the code to a bare minimal, and
+* define tests as data instead
+* allow the test cases definition to be expressed not only as a Python data structure but also as YAML content
+
+Quickstart
+""""""""""
+
+To use UTHelper, your test module will need only a bare minimal of code:
+
+.. code-block:: python
+
+ # tests/unit/plugin/modules/test_ansible_module.py
+ from ansible_collections.community.general.plugins.modules import ansible_module
+ from .uthelper import UTHelper, RunCommandMock
+
+
+ UTHelper.from_module(ansible_module, __name__, mocks=[RunCommandMock])
+
+Then, in the test specification file, you have:
+
+.. code-block:: yaml
+
+ # tests/unit/plugin/modules/test_ansible_module.yaml
+ test_cases:
+ - id: test_ansible_module
+ flags:
+ diff: true
+ input:
+ state: present
+ name: Roger the Shrubber
+ output:
+ shrubbery:
+ looks: nice
+ price: not too expensive
+ changed: true
+ diff:
+ before:
+ shrubbery: null
+ after:
+ shrubbery:
+ looks: nice
+ price: not too expensive
+ mocks:
+ run_command:
+ - command: [/testbin/shrubber, --version]
+ rc: 0
+ out: "2.80.0\n"
+ err: ''
+ - command: [/testbin/shrubber, --make-shrubbery]
+ rc: 0
+ out: 'Shrubbery created'
+ err: ''
+
+.. note::
+
+ If you prefer to pick a different YAML file for the test cases, or if you prefer to define them in plain Python,
+ you can use the convenience methods ``UTHelper.from_file()`` and ``UTHelper.from_spec()``, respectively.
+ See more details below.
+
+
+Using ``UTHelper``
+^^^^^^^^^^^^^^^^^^
+
+Test Module
+"""""""""""
+
+``UTHelper`` is **strictly for unit tests**. To use it, you import the ``.uthelper.UTHelper`` class.
+As mentioned in different parts of this guide, there are three different mechanisms to load the test cases.
+
+.. seealso::
+
+ See the UTHelper class reference below for API details on the three different mechanisms.
+
+
+The easies and most recommended way of using ``UTHelper`` is literally the example shown.
+See a real world example at
+`test_gconftool2.py `_.
+
+The ``from_module()`` method will pick the filename of the test module up (in the example above, ``tests/unit/plugins/modules/test_gconftool2.py``)
+and it will search for ``tests/unit/plugins/modules/test_gconftool2.yaml`` (or ``.yml`` if that is not found).
+In that file it will expect to find the test specification expressed in YAML format, conforming to the structure described below LINK LINK LINK.
+
+If you prefer to read the test specifications a different file path, use ``from_file()`` passing the file handle for the YAML file.
+
+And, if for any reason you prefer or need to pass the data structure rather than dealing with YAML files, use the ``from_spec()`` method.
+A real world example for that can be found at
+`test_snap.py `_.
+
+
+Test Specification
+""""""""""""""""""
+
+The structure of the test specification data is described below.
+
+Top level
+---------
+
+At the top level there are two accepted keys:
+
+- ``anchors: dict``
+ Optional. Placeholder for you to define YAML anchors that can be repeated in the test cases.
+ Its contents are never accessed directly by test Helper.
+- ``test_cases: list``
+ Mandatory. List of test cases, see below for definition.
+
+Test cases
+----------
+
+You write the test cases with five elements:
+
+- ``id: str``
+ Mandatory. Used to identify the test case.
+
+- ``flags: dict``
+ Optional. Flags controling the behavior of the test case. All flags are optional. Accepted flags:
+
+ * ``check: bool``: set to ``true`` if the module is to be executed in **check mode**.
+ * ``diff: bool``: set to ``true`` if the module is to be executed in **diff mode**.
+ * ``skip: str``: set the test case to be skipped, providing the message for ``pytest.skip()``.
+ * ``xfail: str``: set the test case to expect failure, providing the message for ``pytest.xfail()``.
+
+- ``input: dict``
+ Optional. Parameters for the Ansible module, it can be empty.
+
+- ``output: dict``
+ Optional. Expected return values from the Ansible module.
+ All RV names are used here are expected to be found in the module output, but not all RVs in the output must be here.
+ It can include special RVs such as ``changed`` and ``diff``.
+ It can be empty.
+
+- ``mocks: dict``
+ Optional. Mocked interactions, ``run_command`` being the only one supported for now.
+ Each key in this dictionary refers to one subclass of ``TestCaseMock`` and its
+ structure is dictated by the ``TestCaseMock`` subclass implementation.
+ All keys are expected to be named using snake case, as in ``run_command``.
+ The ``TestCaseMock`` subclass is responsible for defining the name used in the test specification.
+ The structure for that specification is dependent on the implementing class.
+ See more details below for the implementation of ``RunCommandMock``
+
+Example using YAML
+------------------
+
+We recommend you use ``UTHelper`` reading the test specifications from a YAML file.
+See an example below of how one actually looks like (excerpt from ``test_opkg.yaml``):
+
+.. code-block:: yaml
+
+ ---
+ anchors:
+ environ: &env-def {environ_update: {LANGUAGE: C, LC_ALL: C}, check_rc: false}
+ test_cases:
+ - id: install_zlibdev
+ input:
+ name: zlib-dev
+ state: present
+ output:
+ msg: installed 1 package(s)
+ mocks:
+ run_command:
+ - command: [/testbin/opkg, --version]
+ environ: *env-def
+ rc: 0
+ out: ''
+ err: ''
+ - command: [/testbin/opkg, list-installed, zlib-dev]
+ environ: *env-def
+ rc: 0
+ out: ''
+ err: ''
+ - command: [/testbin/opkg, install, zlib-dev]
+ environ: *env-def
+ rc: 0
+ out: |
+ Installing zlib-dev (1.2.11-6) to root...
+ Downloading https://downloads.openwrt.org/releases/22.03.0/packages/mips_24kc/base/zlib-dev_1.2.11-6_mips_24kc.ipk
+ Installing zlib (1.2.11-6) to root...
+ Downloading https://downloads.openwrt.org/releases/22.03.0/packages/mips_24kc/base/zlib_1.2.11-6_mips_24kc.ipk
+ Configuring zlib.
+ Configuring zlib-dev.
+ err: ''
+ - command: [/testbin/opkg, list-installed, zlib-dev]
+ environ: *env-def
+ rc: 0
+ out: |
+ zlib-dev - 1.2.11-6
+ err: ''
+ - id: install_zlibdev_present
+ input:
+ name: zlib-dev
+ state: present
+ output:
+ msg: package(s) already present
+ mocks:
+ run_command:
+ - command: [/testbin/opkg, --version]
+ environ: *env-def
+ rc: 0
+ out: ''
+ err: ''
+ - command: [/testbin/opkg, list-installed, zlib-dev]
+ environ: *env-def
+ rc: 0
+ out: |
+ zlib-dev - 1.2.11-6
+ err: ''
+
+TestCaseMocks Specifications
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The ``TestCaseMock`` subclass is free to define the expected data structure.
+
+RunCommandMock Specification
+""""""""""""""""""""""""""""
+
+``RunCommandMock`` mocks can be specified with the key ``run_command`` and it expects a ``list`` in which elements follow the structure:
+
+- ``command: Union[list, str]``
+ Mandatory. The command that is expected to be executed by the module. It corresponds to the parameter ``args`` of the ``AnsibleModule.run_command()`` call.
+ It can be either a list or a string, though the list form is generally recommended.
+- ``environ: dict``
+ Mandatory. All other parameters passed to the ``AnsibleModule.run_command()`` call.
+ Most commonly used are ``environ_update`` and ``check_rc``.
+ Must include all parameters the Ansible module uses in the ``AnsibleModule.run_command()`` call, otherwise the test will fail.
+- ``rc: int``
+ Mandatory. The return code for the command execution.
+ As per usual in bash scripting, a value of ``0`` means success, whereas any other number is an error code.
+- ``out: str``
+ Mandatory. The *stdout* result of the command execution, as one single string containing zero or more lines.
+- ``err: str``
+ Mandatory. The *stderr* result of the command execution, as one single string containing zero or more lines.
+
+
+``UTHelper`` Reference
+^^^^^^^^^^^^^^^^^^^^^^
+
+.. py:module:: .uthelper
+
+ .. py:class:: UTHelper
+
+ A class to encapsulate unit tests.
+
+ .. py:staticmethod:: from_spec(ansible_module, test_module, test_spec, mocks=None)
+
+ Creates an ``UTHelper`` instance from a given test specification.
+
+ :param ansible_module: The Ansible module to be tested.
+ :type ansible_module: :py:class:`types.ModuleType`
+ :param test_module: The test module.
+ :type test_module: :py:class:`types.ModuleType`
+ :param test_spec: The test specification.
+ :type test_spec: dict
+ :param mocks: List of ``TestCaseMocks`` to be used during testing. Currently only ``RunCommandMock`` exists.
+ :type mocks: list or None
+ :return: An ``UTHelper`` instance.
+ :rtype: UTHelper
+
+ Example usage of ``from_spec()``:
+
+ .. code-block:: python
+
+ import sys
+
+ from ansible_collections.community.general.plugins.modules import ansible_module
+ from .uthelper import UTHelper, RunCommandMock
+
+ TEST_SPEC = dict(
+ test_cases=[
+ ...
+ ]
+ )
+
+ helper = UTHelper.from_spec(ansible_module, sys.modules[__name__], TEST_SPEC, mocks=[RunCommandMock])
+
+ .. py:staticmethod:: from_file(ansible_module, test_module, test_spec_filehandle, mocks=None)
+
+ Creates an ``UTHelper`` instance from a test specification file.
+
+ :param ansible_module: The Ansible module to be tested.
+ :type ansible_module: :py:class:`types.ModuleType`
+ :param test_module: The test module.
+ :type test_module: :py:class:`types.ModuleType`
+ :param test_spec_filehandle: A file handle to an file stream handle providing the test specification in YAML format.
+ :type test_spec_filehandle: ``file-like object``
+ :param mocks: List of ``TestCaseMocks`` to be used during testing. Currently only ``RunCommandMock`` exists.
+ :type mocks: list or None
+ :return: An ``UTHelper`` instance.
+ :rtype: UTHelper
+
+ Example usage of ``from_file()``:
+
+ .. code-block:: python
+
+ import sys
+
+ from ansible_collections.community.general.plugins.modules import ansible_module
+ from .uthelper import UTHelper, RunCommandMock
+
+ with open("test_spec.yaml", "r") as test_spec_filehandle:
+ helper = UTHelper.from_file(ansible_module, sys.modules[__name__], test_spec_filehandle, mocks=[RunCommandMock])
+
+ .. py:staticmethod:: from_module(ansible_module, test_module_name, mocks=None)
+
+ Creates an ``UTHelper`` instance from a given Ansible module and test module.
+
+ :param ansible_module: The Ansible module to be tested.
+ :type ansible_module: :py:class:`types.ModuleType`
+ :param test_module_name: The name of the test module. It works if passed ``__name__``.
+ :type test_module_name: str
+ :param mocks: List of ``TestCaseMocks`` to be used during testing. Currently only ``RunCommandMock`` exists.
+ :type mocks: list or None
+ :return: An ``UTHelper`` instance.
+ :rtype: UTHelper
+
+ Example usage of ``from_module()``:
+
+ .. code-block:: python
+
+ from ansible_collections.community.general.plugins.modules import ansible_module
+ from .uthelper import UTHelper, RunCommandMock
+
+ # Example usage
+ helper = UTHelper.from_module(ansible_module, __name__, mocks=[RunCommandMock])
+
+
+Creating TestCaseMocks
+^^^^^^^^^^^^^^^^^^^^^^
+
+To create a new ``TestCaseMock`` you must extend that class and implement the relevant parts:
+
+.. code-block:: python
+
+ class ShrubberyMock(TestCaseMock):
+ # this name is mandatory, it is the name used in the test specification
+ name = "shrubbery"
+
+ def setup(self, mocker):
+ # perform setup, commonly using mocker to patch some other piece of code
+ ...
+
+ def check(self, test_case, results):
+ # verify the tst execution met the expectations of the test case
+ # for example the function was called as many times as it should
+ ...
+
+ def fixtures(self):
+ # returns a dict mapping names to pytest fixtures that should be used for the test case
+ # for example, in RunCommandMock it creates a fixture that patches AnsibleModule.get_bin_path
+ ...
+
+Caveats
+^^^^^^^
+
+Known issues/opportunities for improvement:
+
+* Only one ``UTHelper`` per test module: UTHelper injects a test function with a fixed name into the module's namespace,
+ so placing a second ``UTHelper`` instance is going to overwrite the function created by the first one.
+* Order of elements in module's namespace is not consistent across executions in Python 3.5, so if adding more tests to the test module
+ might make Test Helper add its function before or after the other test functions.
+ In the community.general collection the CI processes uses ``pytest-xdist`` to paralellize and distribute the tests,
+ and it requires the order of the tests to be consistent.
+
+.. versionadded:: 7.5.0
diff --git a/docs/docsite/rst/guide_vardict.rst b/docs/docsite/rst/guide_vardict.rst
new file mode 100644
index 0000000000..1beef0c57f
--- /dev/null
+++ b/docs/docsite/rst/guide_vardict.rst
@@ -0,0 +1,176 @@
+..
+ Copyright (c) Ansible Project
+ GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+ SPDX-License-Identifier: GPL-3.0-or-later
+
+.. _ansible_collections.community.general.docsite.guide_vardict:
+
+VarDict Guide
+=============
+
+Introduction
+^^^^^^^^^^^^
+
+The ``ansible_collections.community.general.plugins.module_utils.vardict`` module util provides the
+``VarDict`` class to help manage the module variables. That class is a container for module variables,
+especially the ones for which the module must keep track of state changes, and the ones that should
+be published as return values.
+
+Each variable has extra behaviors controlled by associated metadata, simplifying the generation of
+output values from the module.
+
+Quickstart
+""""""""""
+
+The simplest way of using ``VarDict`` is:
+
+.. code-block:: python
+
+ from ansible_collections.community.general.plugins.module_utils.vardict import VarDict
+
+Then in ``main()``, or any other function called from there:
+
+.. code-block:: python
+
+ vars = VarDict()
+
+ # Next 3 statements are equivalent
+ vars.abc = 123
+ vars["abc"] = 123
+ vars.set("abc", 123)
+
+ vars.xyz = "bananas"
+ vars.ghi = False
+
+And by the time the module is about to exit:
+
+.. code-block:: python
+
+ results = vars.output()
+ module.exit_json(**results)
+
+That makes the return value of the module:
+
+.. code-block:: json
+
+ {
+ "abc": 123,
+ "xyz": "bananas",
+ "ghi": false
+ }
+
+Metadata
+""""""""
+
+The metadata values associated with each variable are:
+
+- ``output: bool`` - marks the variable for module output as a module return value.
+- ``fact: bool`` - marks the variable for module output as an Ansible fact.
+- ``verbosity: int`` - sets the minimum level of verbosity for which the variable will be included in the output.
+- ``change: bool`` - controls the detection of changes in the variable value.
+- ``initial_value: any`` - when using ``change`` and need to forcefully set an intial value to the variable.
+- ``diff: bool`` - used along with ``change``, this generates an Ansible-style diff ``dict``.
+
+See the sections below for more details on how to use the metadata.
+
+
+Using VarDict
+^^^^^^^^^^^^^
+
+Basic Usage
+"""""""""""
+
+As shown above, variables can be accessed using the ``[]`` operator, as in a ``dict`` object,
+and also as an object attribute, such as ``vars.abc``. The form using the ``set()``
+method is special in the sense that you can use it to set metadata values:
+
+.. code-block:: python
+
+ vars.set("abc", 123, output=False)
+ vars.set("abc", 123, output=True, change=True)
+
+Another way to set metadata after the variables have been created is:
+
+.. code-block:: python
+
+ vars.set_meta("abc", output=False)
+ vars.set_meta("abc", output=True, change=True, diff=True)
+
+You can use either operator and attribute forms to access the value of the variable. Other ways to
+access its value and its metadata are:
+
+.. code-block:: python
+
+ print("abc value = {0}".format(vars.var("abc")["value"])) # get the value
+ print("abc output? {0}".format(vars.get_meta("abc")["output"])) # get the metadata like this
+
+The names of methods, such as ``set``, ``get_meta``, ``output`` amongst others, are reserved and
+cannot be used as variable names. If you try to use a reserved name a ``ValueError`` exception
+is raised with the message "Name is reserved".
+
+Generating output
+"""""""""""""""""
+
+By default, every variable create will be enable for output with minimum verbosity set to zero, in
+other words, they will always be in the output by default.
+
+You can control that when creating the variable for the first time or later in the code:
+
+.. code-block:: python
+
+ vars.set("internal", x + 4, output=False)
+ vars.set_meta("internal", output=False)
+
+You can also set the verbosity of some variable, like:
+
+.. code-block:: python
+
+ vars.set("abc", x + 4)
+ vars.set("debug_x", x, verbosity=3)
+
+ results = vars.output(module._verbosity)
+ module.exit_json(**results)
+
+If the module was invoked with verbosity lower than 3, then the output will only contain
+the variable ``abc``. If running at higher verbosity, as in ``ansible-playbook -vvv``,
+then the output will also contain ``debug_x``.
+
+Generating facts is very similar to regular output, but variables are not marked as facts by default.
+
+.. code-block:: python
+
+ vars.set("modulefact", x + 4, fact=True)
+ vars.set("debugfact", x, fact=True, verbosity=3)
+
+ results = vars.output(module._verbosity)
+ results["ansible_facts"] = {"module_name": vars.facts(module._verbosity)}
+ module.exit_json(**results)
+
+Handling change
+"""""""""""""""
+
+You can use ``VarDict`` to determine whether variables have had their values changed.
+
+.. code-block:: python
+
+ vars.set("abc", 42, change=True)
+ vars.abc = 90
+
+ results = vars.output()
+ results["changed"] = vars.has_changed
+ module.exit_json(**results)
+
+If tracking changes in variables, you may want to present the difference between the initial and the final
+values of it. For that, you want to use:
+
+.. code-block:: python
+
+ vars.set("abc", 42, change=True, diff=True)
+ vars.abc = 90
+
+ results = vars.output()
+ results["changed"] = vars.has_changed
+ results["diff"] = vars.diff()
+ module.exit_json(**results)
+
+.. versionadded:: 7.1.0
diff --git a/docs/docsite/rst/test_guide.rst b/docs/docsite/rst/test_guide.rst
index b0b7885f9b..a1f5723df4 100644
--- a/docs/docsite/rst/test_guide.rst
+++ b/docs/docsite/rst/test_guide.rst
@@ -8,14 +8,14 @@
community.general Test (Plugin) Guide
=====================================
-The :ref:`community.general collection ` offers currently one test plugin.
+The :anscollection:`community.general collection ` offers currently one test plugin.
.. contents:: Topics
Feature Tests
-------------
-The ``a_module`` test allows to check whether a given string refers to an existing module or action plugin. This can be useful in roles, which can use this to ensure that required modules are present ahead of time.
+The :ansplugin:`community.general.a_module test ` allows to check whether a given string refers to an existing module or action plugin. This can be useful in roles, which can use this to ensure that required modules are present ahead of time.
.. code-block:: yaml+jinja
diff --git a/galaxy.yml b/galaxy.yml
index e2f868d53b..0288625dbb 100644
--- a/galaxy.yml
+++ b/galaxy.yml
@@ -5,17 +5,17 @@
namespace: community
name: general
-version: 5.5.0
+version: 12.0.0
readme: README.md
authors:
- Ansible (https://github.com/ansible)
-description: null
+description: >-
+ The community.general collection is a part of the Ansible package and includes many modules and
+ plugins supported by Ansible community which are not part of more specialized community collections.
license_file: COPYING
-tags: [community]
-# NOTE: No dependencies are expected to be added here
-# dependencies:
+tags:
+ - community
repository: https://github.com/ansible-collections/community.general
documentation: https://docs.ansible.com/ansible/latest/collections/community/general/
homepage: https://github.com/ansible-collections/community.general
issues: https://github.com/ansible-collections/community.general/issues
-#type: flatmap
diff --git a/meta/runtime.yml b/meta/runtime.yml
index cbb131346d..d2be5a89c1 100644
--- a/meta/runtime.yml
+++ b/meta/runtime.yml
@@ -3,18 +3,132 @@
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-requires_ansible: '>=2.11.0'
+requires_ansible: '>=2.17.0'
+action_groups:
+ consul:
+ - consul_agent_check
+ - consul_agent_service
+ - consul_auth_method
+ - consul_binding_rule
+ - consul_policy
+ - consul_role
+ - consul_session
+ - consul_token
+ proxmox:
+ - metadata:
+ extend_group:
+ - community.proxmox.proxmox
+ keycloak:
+ - keycloak_authentication
+ - keycloak_authentication_required_actions
+ - keycloak_authz_authorization_scope
+ - keycloak_authz_custom_policy
+ - keycloak_authz_permission
+ - keycloak_authz_permission_info
+ - keycloak_client
+ - keycloak_client_rolemapping
+ - keycloak_client_rolescope
+ - keycloak_clientscope
+ - keycloak_clientscope_type
+ - keycloak_clientsecret_info
+ - keycloak_clientsecret_regenerate
+ - keycloak_clienttemplate
+ - keycloak_component
+ - keycloak_component_info
+ - keycloak_group
+ - keycloak_identity_provider
+ - keycloak_realm
+ - keycloak_realm_key
+ - keycloak_realm_keys_metadata_info
+ - keycloak_realm_rolemapping
+ - keycloak_role
+ - keycloak_user
+ - keycloak_user_federation
+ - keycloak_user_rolemapping
+ - keycloak_userprofile
+ scaleway:
+ - scaleway_compute
+ - scaleway_compute_private_network
+ - scaleway_container
+ - scaleway_container_info
+ - scaleway_container_namespace
+ - scaleway_container_namespace_info
+ - scaleway_container_registry
+ - scaleway_container_registry_info
+ - scaleway_database_backup
+ - scaleway_function
+ - scaleway_function_info
+ - scaleway_function_namespace
+ - scaleway_function_namespace_info
+ - scaleway_image_info
+ - scaleway_ip
+ - scaleway_ip_info
+ - scaleway_lb
+ - scaleway_organization_info
+ - scaleway_private_network
+ - scaleway_security_group
+ - scaleway_security_group_info
+ - scaleway_security_group_rule
+ - scaleway_server_info
+ - scaleway_snapshot_info
+ - scaleway_sshkey
+ - scaleway_user_data
+ - scaleway_volume
+ - scaleway_volume_info
+
plugin_routing:
+ callback:
+ actionable:
+ tombstone:
+ removal_version: 2.0.0
+ warning_text: Use the 'default' callback plugin with 'display_skipped_hosts
+ = no' and 'display_ok_hosts = no' options.
+ full_skip:
+ tombstone:
+ removal_version: 2.0.0
+ warning_text: Use the 'default' callback plugin with 'display_skipped_hosts
+ = no' option.
+ hipchat:
+ tombstone:
+ removal_version: 10.0.0
+ warning_text: The hipchat service has been discontinued and the self-hosted variant has been End of Life since 2020.
+ osx_say:
+ redirect: community.general.say
+ stderr:
+ tombstone:
+ removal_version: 2.0.0
+ warning_text: Use the 'default' callback plugin with 'display_failed_stderr
+ = yes' option.
+ yaml:
+ tombstone:
+ removal_version: 12.0.0
+ warning_text: >-
+ The plugin has been superseded by the option `result_format=yaml` in callback plugin ansible.builtin.default from ansible-core 2.13 onwards.
connection:
docker:
redirect: community.docker.docker
oc:
redirect: community.okd.oc
+ proxmox_pct_remote:
+ redirect: community.proxmox.proxmox_pct_remote
+ deprecation:
+ removal_version: 15.0.0
+ warning_text: The proxmox content has been moved to community.proxmox.
lookup:
gcp_storage_file:
redirect: community.google.gcp_storage_file
hashi_vault:
redirect: community.hashi_vault.hashi_vault
+ hiera:
+ deprecation:
+ removal_version: 13.0.0
+ warning_text: >-
+ Hiera has been deprecated a long time ago.
+ If you disagree with this deprecation, please create an issue in the community.general repository.
+ manifold:
+ tombstone:
+ removal_version: 11.0.0
+ warning_text: Company was acquired in 2021 and service was ceased afterwards.
nios:
redirect: infoblox.nios_modules.nios_lookup
nios_next_ip:
@@ -22,160 +136,72 @@ plugin_routing:
nios_next_network:
redirect: infoblox.nios_modules.nios_next_network
modules:
- aerospike_migrations:
- redirect: community.general.database.aerospike.aerospike_migrations
- airbrake_deployment:
- redirect: community.general.monitoring.airbrake_deployment
- aix_devices:
- redirect: community.general.system.aix_devices
- aix_filesystem:
- redirect: community.general.system.aix_filesystem
- aix_inittab:
- redirect: community.general.system.aix_inittab
- aix_lvg:
- redirect: community.general.system.aix_lvg
- aix_lvol:
- redirect: community.general.system.aix_lvol
- alerta_customer:
- redirect: community.general.monitoring.alerta_customer
- ali_instance:
- redirect: community.general.cloud.alicloud.ali_instance
ali_instance_facts:
tombstone:
removal_version: 3.0.0
warning_text: Use community.general.ali_instance_info instead.
- ali_instance_info:
- redirect: community.general.cloud.alicloud.ali_instance_info
- alternatives:
- redirect: community.general.system.alternatives
- ansible_galaxy_install:
- redirect: community.general.packaging.language.ansible_galaxy_install
- apache2_mod_proxy:
- redirect: community.general.web_infrastructure.apache2_mod_proxy
- apache2_module:
- redirect: community.general.web_infrastructure.apache2_module
- apk:
- redirect: community.general.packaging.os.apk
- apt_repo:
- redirect: community.general.packaging.os.apt_repo
- apt_rpm:
- redirect: community.general.packaging.os.apt_rpm
- archive:
- redirect: community.general.files.archive
atomic_container:
- redirect: community.general.cloud.atomic.atomic_container
+ deprecation:
+ removal_version: 13.0.0
+ warning_text: Project Atomic was sunset by the end of 2019.
atomic_host:
- redirect: community.general.cloud.atomic.atomic_host
+ deprecation:
+ removal_version: 13.0.0
+ warning_text: Project Atomic was sunset by the end of 2019.
atomic_image:
- redirect: community.general.cloud.atomic.atomic_image
- awall:
- redirect: community.general.system.awall
- beadm:
- redirect: community.general.system.beadm
+ deprecation:
+ removal_version: 13.0.0
+ warning_text: Project Atomic was sunset by the end of 2019.
bearychat:
- redirect: community.general.notification.bearychat
- bigpanda:
- redirect: community.general.monitoring.bigpanda
- bitbucket_access_key:
- redirect: community.general.source_control.bitbucket.bitbucket_access_key
- bitbucket_pipeline_key_pair:
- redirect: community.general.source_control.bitbucket.bitbucket_pipeline_key_pair
- bitbucket_pipeline_known_host:
- redirect: community.general.source_control.bitbucket.bitbucket_pipeline_known_host
- bitbucket_pipeline_variable:
- redirect: community.general.source_control.bitbucket.bitbucket_pipeline_variable
- bower:
- redirect: community.general.packaging.language.bower
- bundler:
- redirect: community.general.packaging.language.bundler
- bzr:
- redirect: community.general.source_control.bzr
- campfire:
- redirect: community.general.notification.campfire
- capabilities:
- redirect: community.general.system.capabilities
- cargo:
- redirect: community.general.packaging.language.cargo
+ tombstone:
+ removal_version: 12.0.0
+ warning_text: Chat service is no longer available.
catapult:
- redirect: community.general.notification.catapult
- circonus_annotation:
- redirect: community.general.monitoring.circonus_annotation
+ deprecation:
+ removal_version: 13.0.0
+ warning_text: DNS fails to resolve the API endpoint used by the module since Oct 2024. See https://github.com/ansible-collections/community.general/issues/10318 for details.
cisco_spark:
- redirect: community.general.notification.cisco_spark
- cisco_webex:
- redirect: community.general.notification.cisco_webex
- clc_aa_policy:
- redirect: community.general.cloud.centurylink.clc_aa_policy
+ redirect: community.general.cisco_webex
clc_alert_policy:
- redirect: community.general.cloud.centurylink.clc_alert_policy
+ tombstone:
+ removal_version: 11.0.0
+ warning_text: CenturyLink Cloud services went EOL in September 2023.
clc_blueprint_package:
- redirect: community.general.cloud.centurylink.clc_blueprint_package
+ tombstone:
+ removal_version: 11.0.0
+ warning_text: CenturyLink Cloud services went EOL in September 2023.
clc_firewall_policy:
- redirect: community.general.cloud.centurylink.clc_firewall_policy
+ tombstone:
+ removal_version: 11.0.0
+ warning_text: CenturyLink Cloud services went EOL in September 2023.
clc_group:
- redirect: community.general.cloud.centurylink.clc_group
+ tombstone:
+ removal_version: 11.0.0
+ warning_text: CenturyLink Cloud services went EOL in September 2023.
clc_loadbalancer:
- redirect: community.general.cloud.centurylink.clc_loadbalancer
+ tombstone:
+ removal_version: 11.0.0
+ warning_text: CenturyLink Cloud services went EOL in September 2023.
clc_modify_server:
- redirect: community.general.cloud.centurylink.clc_modify_server
+ tombstone:
+ removal_version: 11.0.0
+ warning_text: CenturyLink Cloud services went EOL in September 2023.
clc_publicip:
- redirect: community.general.cloud.centurylink.clc_publicip
+ tombstone:
+ removal_version: 11.0.0
+ warning_text: CenturyLink Cloud services went EOL in September 2023.
clc_server:
- redirect: community.general.cloud.centurylink.clc_server
+ tombstone:
+ removal_version: 11.0.0
+ warning_text: CenturyLink Cloud services went EOL in September 2023.
clc_server_snapshot:
- redirect: community.general.cloud.centurylink.clc_server_snapshot
- cloud_init_data_facts:
- redirect: community.general.cloud.misc.cloud_init_data_facts
- cloudflare_dns:
- redirect: community.general.net_tools.cloudflare_dns
- cobbler_sync:
- redirect: community.general.remote_management.cobbler.cobbler_sync
- cobbler_system:
- redirect: community.general.remote_management.cobbler.cobbler_system
- composer:
- redirect: community.general.packaging.language.composer
- consul:
- redirect: community.general.clustering.consul.consul
+ tombstone:
+ removal_version: 11.0.0
+ warning_text: CenturyLink Cloud services went EOL in September 2023.
consul_acl:
- redirect: community.general.clustering.consul.consul_acl
- consul_kv:
- redirect: community.general.clustering.consul.consul_kv
- consul_session:
- redirect: community.general.clustering.consul.consul_session
- copr:
- redirect: community.general.packaging.os.copr
- cpanm:
- redirect: community.general.packaging.language.cpanm
- cronvar:
- redirect: community.general.system.cronvar
- crypttab:
- redirect: community.general.system.crypttab
- datadog_downtime:
- redirect: community.general.monitoring.datadog.datadog_downtime
- datadog_event:
- redirect: community.general.monitoring.datadog.datadog_event
- datadog_monitor:
- redirect: community.general.monitoring.datadog.datadog_monitor
- dconf:
- redirect: community.general.system.dconf
- deploy_helper:
- redirect: community.general.web_infrastructure.deploy_helper
- dimensiondata_network:
- redirect: community.general.cloud.dimensiondata.dimensiondata_network
- dimensiondata_vlan:
- redirect: community.general.cloud.dimensiondata.dimensiondata_vlan
- discord:
- redirect: community.general.notification.discord
- django_manage:
- redirect: community.general.web_infrastructure.django_manage
- dnf_versionlock:
- redirect: community.general.packaging.os.dnf_versionlock
- dnsimple:
- redirect: community.general.net_tools.dnsimple
- dnsimple_info:
- redirect: community.general.net_tools.dnsimple_info
- dnsmadeeasy:
- redirect: community.general.net_tools.dnsmadeeasy
+ tombstone:
+ removal_version: 10.0.0
+ warning_text: Use community.general.consul_token and/or community.general.consul_policy instead.
docker_compose:
redirect: community.docker.docker_compose
docker_config:
@@ -230,36 +256,19 @@ plugin_routing:
redirect: community.docker.docker_volume
docker_volume_info:
redirect: community.docker.docker_volume_info
- dpkg_divert:
- redirect: community.general.system.dpkg_divert
- easy_install:
- redirect: community.general.packaging.language.easy_install
- ejabberd_user:
- redirect: community.general.web_infrastructure.ejabberd_user
- elasticsearch_plugin:
- redirect: community.general.database.misc.elasticsearch_plugin
- emc_vnx_sg_member:
- redirect: community.general.storage.emc.emc_vnx_sg_member
- etcd3:
- redirect: community.general.clustering.etcd3
facter:
- redirect: community.general.system.facter
- filesize:
- redirect: community.general.files.filesize
- filesystem:
- redirect: community.general.system.filesystem
- flatpak:
- redirect: community.general.packaging.os.flatpak
- flatpak_remote:
- redirect: community.general.packaging.os.flatpak_remote
+ tombstone:
+ removal_version: 12.0.0
+ warning_text: Use community.general.facter_facts instead.
flowdock:
- redirect: community.general.notification.flowdock
+ tombstone:
+ removal_version: 9.0.0
+ warning_text: This module relied on HTTPS APIs that do not exist anymore and
+ there is no clear path to update.
foreman:
tombstone:
removal_version: 2.0.0
warning_text: Use the modules from the theforeman.foreman collection instead.
- gandi_livedns:
- redirect: community.general.net_tools.gandi_livedns
gc_storage:
redirect: community.google.gc_storage
gcdns_record:
@@ -294,10 +303,6 @@ plugin_routing:
redirect: community.google.gce_snapshot
gce_tag:
redirect: community.google.gce_tag
- gconftool2:
- redirect: community.general.system.gconftool2
- gconftool2_info:
- redirect: community.general.system.gconftool2_info
gcp_backend_service:
tombstone:
removal_version: 2.0.0
@@ -333,63 +338,13 @@ plugin_routing:
removal_version: 2.0.0
warning_text: Use google.cloud.gcp_spanner_database and/or google.cloud.gcp_spanner_instance
instead.
- gem:
- redirect: community.general.packaging.language.gem
- git_config:
- redirect: community.general.source_control.git_config
- github_deploy_key:
- redirect: community.general.source_control.github.github_deploy_key
github_hooks:
tombstone:
removal_version: 2.0.0
warning_text: Use community.general.github_webhook and community.general.github_webhook_info
instead.
- github_issue:
- redirect: community.general.source_control.github.github_issue
- github_key:
- redirect: community.general.source_control.github.github_key
- github_release:
- redirect: community.general.source_control.github.github_release
- github_repo:
- redirect: community.general.source_control.github.github_repo
- github_webhook:
- redirect: community.general.source_control.github.github_webhook
- github_webhook_info:
- redirect: community.general.source_control.github.github_webhook_info
- gitlab_branch:
- redirect: community.general.source_control.gitlab.gitlab_branch
- gitlab_deploy_key:
- redirect: community.general.source_control.gitlab.gitlab_deploy_key
- gitlab_group:
- redirect: community.general.source_control.gitlab.gitlab_group
- gitlab_group_members:
- redirect: community.general.source_control.gitlab.gitlab_group_members
- gitlab_group_variable:
- redirect: community.general.source_control.gitlab.gitlab_group_variable
- gitlab_hook:
- redirect: community.general.source_control.gitlab.gitlab_hook
- gitlab_project:
- redirect: community.general.source_control.gitlab.gitlab_project
- gitlab_project_members:
- redirect: community.general.source_control.gitlab.gitlab_project_members
- gitlab_project_variable:
- redirect: community.general.source_control.gitlab.gitlab_project_variable
- gitlab_protected_branch:
- redirect: community.general.source_control.gitlab.gitlab_protected_branch
- gitlab_runner:
- redirect: community.general.source_control.gitlab.gitlab_runner
- gitlab_user:
- redirect: community.general.source_control.gitlab.gitlab_user
- grove:
- redirect: community.general.notification.grove
- gunicorn:
- redirect: community.general.web_infrastructure.gunicorn
hana_query:
- redirect: community.general.database.saphana.hana_query
- haproxy:
- redirect: community.general.net_tools.haproxy
- heroku_collaborator:
- redirect: community.general.cloud.heroku.heroku_collaborator
+ redirect: community.sap_libs.sap_hdbsql
hetzner_failover_ip:
redirect: community.hrobot.failover_ip
hetzner_failover_ip_info:
@@ -398,226 +353,30 @@ plugin_routing:
redirect: community.hrobot.firewall
hetzner_firewall_info:
redirect: community.hrobot.firewall_info
- hg:
- redirect: community.general.source_control.hg
hipchat:
- redirect: community.general.notification.hipchat
- homebrew:
- redirect: community.general.packaging.os.homebrew
- homebrew_cask:
- redirect: community.general.packaging.os.homebrew_cask
- homebrew_tap:
- redirect: community.general.packaging.os.homebrew_tap
- homectl:
- redirect: community.general.system.homectl
- honeybadger_deployment:
- redirect: community.general.monitoring.honeybadger_deployment
- hpilo_boot:
- redirect: community.general.remote_management.hpilo.hpilo_boot
+ tombstone:
+ removal_version: 11.0.0
+ warning_text: The hipchat service has been discontinued and the self-hosted variant has been End of Life since 2020.
hpilo_facts:
tombstone:
removal_version: 3.0.0
warning_text: Use community.general.hpilo_info instead.
- hpilo_info:
- redirect: community.general.remote_management.hpilo.hpilo_info
- hponcfg:
- redirect: community.general.remote_management.hpilo.hponcfg
- htpasswd:
- redirect: community.general.web_infrastructure.htpasswd
- hwc_ecs_instance:
- redirect: community.general.cloud.huawei.hwc_ecs_instance
- hwc_evs_disk:
- redirect: community.general.cloud.huawei.hwc_evs_disk
- hwc_network_vpc:
- redirect: community.general.cloud.huawei.hwc_network_vpc
- hwc_smn_topic:
- redirect: community.general.cloud.huawei.hwc_smn_topic
- hwc_vpc_eip:
- redirect: community.general.cloud.huawei.hwc_vpc_eip
- hwc_vpc_peering_connect:
- redirect: community.general.cloud.huawei.hwc_vpc_peering_connect
- hwc_vpc_port:
- redirect: community.general.cloud.huawei.hwc_vpc_port
- hwc_vpc_private_ip:
- redirect: community.general.cloud.huawei.hwc_vpc_private_ip
- hwc_vpc_route:
- redirect: community.general.cloud.huawei.hwc_vpc_route
- hwc_vpc_security_group:
- redirect: community.general.cloud.huawei.hwc_vpc_security_group
- hwc_vpc_security_group_rule:
- redirect: community.general.cloud.huawei.hwc_vpc_security_group_rule
- hwc_vpc_subnet:
- redirect: community.general.cloud.huawei.hwc_vpc_subnet
- ibm_sa_domain:
- redirect: community.general.storage.ibm.ibm_sa_domain
- ibm_sa_host:
- redirect: community.general.storage.ibm.ibm_sa_host
- ibm_sa_host_ports:
- redirect: community.general.storage.ibm.ibm_sa_host_ports
- ibm_sa_pool:
- redirect: community.general.storage.ibm.ibm_sa_pool
- ibm_sa_vol:
- redirect: community.general.storage.ibm.ibm_sa_vol
- ibm_sa_vol_map:
- redirect: community.general.storage.ibm.ibm_sa_vol_map
- icinga2_feature:
- redirect: community.general.monitoring.icinga2_feature
- icinga2_host:
- redirect: community.general.monitoring.icinga2_host
idrac_firmware:
redirect: dellemc.openmanage.idrac_firmware
- idrac_redfish_command:
- redirect: community.general.remote_management.redfish.idrac_redfish_command
- idrac_redfish_config:
- redirect: community.general.remote_management.redfish.idrac_redfish_config
idrac_redfish_facts:
tombstone:
removal_version: 3.0.0
warning_text: Use community.general.idrac_redfish_info instead.
- idrac_redfish_info:
- redirect: community.general.remote_management.redfish.idrac_redfish_info
idrac_server_config_profile:
redirect: dellemc.openmanage.idrac_server_config_profile
- ilo_redfish_config:
- redirect: community.general.remote_management.redfish.ilo_redfish_config
- ilo_redfish_info:
- redirect: community.general.remote_management.redfish.ilo_redfish_info
- imc_rest:
- redirect: community.general.remote_management.imc.imc_rest
- imgadm:
- redirect: community.general.cloud.smartos.imgadm
- infinity:
- redirect: community.general.net_tools.infinity.infinity
- influxdb_database:
- redirect: community.general.database.influxdb.influxdb_database
- influxdb_query:
- redirect: community.general.database.influxdb.influxdb_query
- influxdb_retention_policy:
- redirect: community.general.database.influxdb.influxdb_retention_policy
- influxdb_user:
- redirect: community.general.database.influxdb.influxdb_user
- influxdb_write:
- redirect: community.general.database.influxdb.influxdb_write
- ini_file:
- redirect: community.general.files.ini_file
- installp:
- redirect: community.general.packaging.os.installp
- interfaces_file:
- redirect: community.general.system.interfaces_file
- ip_netns:
- redirect: community.general.net_tools.ip_netns
- ipa_config:
- redirect: community.general.identity.ipa.ipa_config
- ipa_dnsrecord:
- redirect: community.general.identity.ipa.ipa_dnsrecord
- ipa_dnszone:
- redirect: community.general.identity.ipa.ipa_dnszone
- ipa_group:
- redirect: community.general.identity.ipa.ipa_group
- ipa_hbacrule:
- redirect: community.general.identity.ipa.ipa_hbacrule
- ipa_host:
- redirect: community.general.identity.ipa.ipa_host
- ipa_hostgroup:
- redirect: community.general.identity.ipa.ipa_hostgroup
- ipa_otpconfig:
- redirect: community.general.identity.ipa.ipa_otpconfig
- ipa_otptoken:
- redirect: community.general.identity.ipa.ipa_otptoken
- ipa_pwpolicy:
- redirect: community.general.identity.ipa.ipa_pwpolicy
- ipa_role:
- redirect: community.general.identity.ipa.ipa_role
- ipa_service:
- redirect: community.general.identity.ipa.ipa_service
- ipa_subca:
- redirect: community.general.identity.ipa.ipa_subca
- ipa_sudocmd:
- redirect: community.general.identity.ipa.ipa_sudocmd
- ipa_sudocmdgroup:
- redirect: community.general.identity.ipa.ipa_sudocmdgroup
- ipa_sudorule:
- redirect: community.general.identity.ipa.ipa_sudorule
- ipa_user:
- redirect: community.general.identity.ipa.ipa_user
- ipa_vault:
- redirect: community.general.identity.ipa.ipa_vault
- ipify_facts:
- redirect: community.general.net_tools.ipify_facts
- ipinfoio_facts:
- redirect: community.general.net_tools.ipinfoio_facts
- ipmi_boot:
- redirect: community.general.remote_management.ipmi.ipmi_boot
- ipmi_power:
- redirect: community.general.remote_management.ipmi.ipmi_power
- iptables_state:
- redirect: community.general.system.iptables_state
- ipwcli_dns:
- redirect: community.general.net_tools.ipwcli_dns
- irc:
- redirect: community.general.notification.irc
- iso_create:
- redirect: community.general.files.iso_create
- iso_extract:
- redirect: community.general.files.iso_extract
- jabber:
- redirect: community.general.notification.jabber
- java_cert:
- redirect: community.general.system.java_cert
- java_keystore:
- redirect: community.general.system.java_keystore
- jboss:
- redirect: community.general.web_infrastructure.jboss
- jenkins_build:
- redirect: community.general.web_infrastructure.jenkins_build
- jenkins_job:
- redirect: community.general.web_infrastructure.jenkins_job
jenkins_job_facts:
tombstone:
removal_version: 3.0.0
warning_text: Use community.general.jenkins_job_info instead.
- jenkins_job_info:
- redirect: community.general.web_infrastructure.jenkins_job_info
- jenkins_plugin:
- redirect: community.general.web_infrastructure.jenkins_plugin
- jenkins_script:
- redirect: community.general.web_infrastructure.jenkins_script
- jira:
- redirect: community.general.web_infrastructure.jira
katello:
tombstone:
removal_version: 2.0.0
warning_text: Use the modules from the theforeman.foreman collection instead.
- kernel_blacklist:
- redirect: community.general.system.kernel_blacklist
- keycloak_authentication:
- redirect: community.general.identity.keycloak.keycloak_authentication
- keycloak_client:
- redirect: community.general.identity.keycloak.keycloak_client
- keycloak_client_rolemapping:
- redirect: community.general.identity.keycloak.keycloak_client_rolemapping
- keycloak_clientscope:
- redirect: community.general.identity.keycloak.keycloak_clientscope
- keycloak_clienttemplate:
- redirect: community.general.identity.keycloak.keycloak_clienttemplate
- keycloak_group:
- redirect: community.general.identity.keycloak.keycloak_group
- keycloak_identity_provider:
- redirect: community.general.identity.keycloak.keycloak_identity_provider
- keycloak_realm:
- redirect: community.general.identity.keycloak.keycloak_realm
- keycloak_realm_info:
- redirect: community.general.identity.keycloak.keycloak_realm_info
- keycloak_role:
- redirect: community.general.identity.keycloak.keycloak_role
- keycloak_user_federation:
- redirect: community.general.identity.keycloak.keycloak_user_federation
- keyring:
- redirect: community.general.system.keyring
- keyring_info:
- redirect: community.general.system.keyring_info
- kibana_plugin:
- redirect: community.general.database.misc.kibana_plugin
kubevirt_cdi_upload:
redirect: community.kubevirt.kubevirt_cdi_upload
kubevirt_preset:
@@ -630,40 +389,10 @@ plugin_routing:
redirect: community.kubevirt.kubevirt_template
kubevirt_vm:
redirect: community.kubevirt.kubevirt_vm
- launchd:
- redirect: community.general.system.launchd
- layman:
- redirect: community.general.packaging.os.layman
- lbu:
- redirect: community.general.system.lbu
ldap_attr:
tombstone:
removal_version: 3.0.0
warning_text: Use community.general.ldap_attrs instead.
- ldap_attrs:
- redirect: community.general.net_tools.ldap.ldap_attrs
- ldap_entry:
- redirect: community.general.net_tools.ldap.ldap_entry
- ldap_passwd:
- redirect: community.general.net_tools.ldap.ldap_passwd
- ldap_search:
- redirect: community.general.net_tools.ldap.ldap_search
- librato_annotation:
- redirect: community.general.monitoring.librato_annotation
- linode:
- redirect: community.general.cloud.linode.linode
- linode_v4:
- redirect: community.general.cloud.linode.linode_v4
- listen_ports_facts:
- redirect: community.general.system.listen_ports_facts
- lldp:
- redirect: community.general.net_tools.lldp
- locale_gen:
- redirect: community.general.system.locale_gen
- logentries:
- redirect: community.general.monitoring.logentries
- logentries_msg:
- redirect: community.general.notification.logentries_msg
logicmonitor:
tombstone:
removal_version: 1.0.0
@@ -674,86 +403,14 @@ plugin_routing:
removal_version: 1.0.0
warning_text: The logicmonitor_facts module is no longer maintained and the
API used has been disabled in 2017.
- logstash_plugin:
- redirect: community.general.monitoring.logstash_plugin
- lvg:
- redirect: community.general.system.lvg
- lvol:
- redirect: community.general.system.lvol
- lxc_container:
- redirect: community.general.cloud.lxc.lxc_container
- lxca_cmms:
- redirect: community.general.remote_management.lxca.lxca_cmms
- lxca_nodes:
- redirect: community.general.remote_management.lxca.lxca_nodes
- lxd_container:
- redirect: community.general.cloud.lxd.lxd_container
- lxd_profile:
- redirect: community.general.cloud.lxd.lxd_profile
- lxd_project:
- redirect: community.general.cloud.lxd.lxd_project
- macports:
- redirect: community.general.packaging.os.macports
- mail:
- redirect: community.general.notification.mail
- make:
- redirect: community.general.system.make
- manageiq_alert_profiles:
- redirect: community.general.remote_management.manageiq.manageiq_alert_profiles
- manageiq_alerts:
- redirect: community.general.remote_management.manageiq.manageiq_alerts
- manageiq_group:
- redirect: community.general.remote_management.manageiq.manageiq_group
- manageiq_policies:
- redirect: community.general.remote_management.manageiq.manageiq_policies
- manageiq_provider:
- redirect: community.general.remote_management.manageiq.manageiq_provider
- manageiq_tags:
- redirect: community.general.remote_management.manageiq.manageiq_tags
- manageiq_tenant:
- redirect: community.general.remote_management.manageiq.manageiq_tenant
- manageiq_user:
- redirect: community.general.remote_management.manageiq.manageiq_user
- mas:
- redirect: community.general.packaging.os.mas
- matrix:
- redirect: community.general.notification.matrix
- mattermost:
- redirect: community.general.notification.mattermost
- maven_artifact:
- redirect: community.general.packaging.language.maven_artifact
- memset_dns_reload:
- redirect: community.general.cloud.memset.memset_dns_reload
memset_memstore_facts:
tombstone:
removal_version: 3.0.0
warning_text: Use community.general.memset_memstore_info instead.
- memset_memstore_info:
- redirect: community.general.cloud.memset.memset_memstore_info
memset_server_facts:
tombstone:
removal_version: 3.0.0
warning_text: Use community.general.memset_server_info instead.
- memset_server_info:
- redirect: community.general.cloud.memset.memset_server_info
- memset_zone:
- redirect: community.general.cloud.memset.memset_zone
- memset_zone_domain:
- redirect: community.general.cloud.memset.memset_zone_domain
- memset_zone_record:
- redirect: community.general.cloud.memset.memset_zone_record
- mksysb:
- redirect: community.general.system.mksysb
- modprobe:
- redirect: community.general.system.modprobe
- monit:
- redirect: community.general.monitoring.monit
- mqtt:
- redirect: community.general.notification.mqtt
- mssql_db:
- redirect: community.general.database.mssql.mssql_db
- mssql_script:
- redirect: community.general.database.mssql.mssql_script
na_cdot_aggregate:
tombstone:
removal_version: 2.0.0
@@ -790,22 +447,10 @@ plugin_routing:
tombstone:
removal_version: 3.0.0
warning_text: Use netapp.ontap.na_ontap_info instead.
- nagios:
- redirect: community.general.monitoring.nagios
- netcup_dns:
- redirect: community.general.net_tools.netcup_dns
- newrelic_deployment:
- redirect: community.general.monitoring.newrelic_deployment
- nexmo:
- redirect: community.general.notification.nexmo
nginx_status_facts:
tombstone:
removal_version: 3.0.0
warning_text: Use community.general.nginx_status_info instead.
- nginx_status_info:
- redirect: community.general.web_infrastructure.nginx_status_info
- nictagadm:
- redirect: community.general.cloud.smartos.nictagadm
nios_a_record:
redirect: infoblox.nios_modules.nios_a_record
nios_aaaa_record:
@@ -838,157 +483,61 @@ plugin_routing:
redirect: infoblox.nios_modules.nios_txt_record
nios_zone:
redirect: infoblox.nios_modules.nios_zone
- nmcli:
- redirect: community.general.net_tools.nmcli
- nomad_job:
- redirect: community.general.clustering.nomad.nomad_job
- nomad_job_info:
- redirect: community.general.clustering.nomad.nomad_job_info
- nosh:
- redirect: community.general.system.nosh
- notification.cisco_spark:
- redirect: community.general.notification.cisco_webex
- npm:
- redirect: community.general.packaging.language.npm
- nsupdate:
- redirect: community.general.net_tools.nsupdate
oci_vcn:
- redirect: community.general.cloud.oracle.oci_vcn
- odbc:
- redirect: community.general.database.misc.odbc
- office_365_connector_card:
- redirect: community.general.notification.office_365_connector_card
- ohai:
- redirect: community.general.system.ohai
- omapi_host:
- redirect: community.general.net_tools.omapi_host
+ deprecation:
+ removal_version: 13.0.0
+ warning_text: Use oracle.oci.oci_network_vcn instead.
ome_device_info:
redirect: dellemc.openmanage.ome_device_info
- one_host:
- redirect: community.general.cloud.opennebula.one_host
- one_image:
- redirect: community.general.cloud.opennebula.one_image
one_image_facts:
tombstone:
removal_version: 3.0.0
warning_text: Use community.general.one_image_info instead.
- one_image_info:
- redirect: community.general.cloud.opennebula.one_image_info
- one_service:
- redirect: community.general.cloud.opennebula.one_service
- one_template:
- redirect: community.general.cloud.opennebula.one_template
- one_vm:
- redirect: community.general.cloud.opennebula.one_vm
- oneandone_firewall_policy:
- redirect: community.general.cloud.oneandone.oneandone_firewall_policy
- oneandone_load_balancer:
- redirect: community.general.cloud.oneandone.oneandone_load_balancer
- oneandone_monitoring_policy:
- redirect: community.general.cloud.oneandone.oneandone_monitoring_policy
- oneandone_private_network:
- redirect: community.general.cloud.oneandone.oneandone_private_network
- oneandone_public_ip:
- redirect: community.general.cloud.oneandone.oneandone_public_ip
- oneandone_server:
- redirect: community.general.cloud.oneandone.oneandone_server
onepassword_facts:
tombstone:
removal_version: 3.0.0
warning_text: Use community.general.onepassword_info instead.
- onepassword_info:
- redirect: community.general.identity.onepassword_info
oneview_datacenter_facts:
tombstone:
removal_version: 3.0.0
warning_text: Use community.general.oneview_datacenter_info instead.
- oneview_datacenter_info:
- redirect: community.general.remote_management.oneview.oneview_datacenter_info
oneview_enclosure_facts:
tombstone:
removal_version: 3.0.0
warning_text: Use community.general.oneview_enclosure_info instead.
- oneview_enclosure_info:
- redirect: community.general.remote_management.oneview.oneview_enclosure_info
- oneview_ethernet_network:
- redirect: community.general.remote_management.oneview.oneview_ethernet_network
oneview_ethernet_network_facts:
tombstone:
removal_version: 3.0.0
warning_text: Use community.general.oneview_ethernet_network_info instead.
- oneview_ethernet_network_info:
- redirect: community.general.remote_management.oneview.oneview_ethernet_network_info
- oneview_fc_network:
- redirect: community.general.remote_management.oneview.oneview_fc_network
oneview_fc_network_facts:
tombstone:
removal_version: 3.0.0
warning_text: Use community.general.oneview_fc_network_info instead.
- oneview_fc_network_info:
- redirect: community.general.remote_management.oneview.oneview_fc_network_info
- oneview_fcoe_network:
- redirect: community.general.remote_management.oneview.oneview_fcoe_network
oneview_fcoe_network_facts:
tombstone:
removal_version: 3.0.0
warning_text: Use community.general.oneview_fcoe_network_info instead.
- oneview_fcoe_network_info:
- redirect: community.general.remote_management.oneview.oneview_fcoe_network_info
- oneview_logical_interconnect_group:
- redirect: community.general.remote_management.oneview.oneview_logical_interconnect_group
oneview_logical_interconnect_group_facts:
tombstone:
removal_version: 3.0.0
warning_text: Use community.general.oneview_logical_interconnect_group_info
instead.
- oneview_logical_interconnect_group_info:
- redirect: community.general.remote_management.oneview.oneview_logical_interconnect_group_info
- oneview_network_set:
- redirect: community.general.remote_management.oneview.oneview_network_set
oneview_network_set_facts:
tombstone:
removal_version: 3.0.0
warning_text: Use community.general.oneview_network_set_info instead.
- oneview_network_set_info:
- redirect: community.general.remote_management.oneview.oneview_network_set_info
- oneview_san_manager:
- redirect: community.general.remote_management.oneview.oneview_san_manager
oneview_san_manager_facts:
tombstone:
removal_version: 3.0.0
warning_text: Use community.general.oneview_san_manager_info instead.
- oneview_san_manager_info:
- redirect: community.general.remote_management.oneview.oneview_san_manager_info
online_server_facts:
tombstone:
removal_version: 3.0.0
warning_text: Use community.general.online_server_info instead.
- online_server_info:
- redirect: community.general.cloud.online.online_server_info
online_user_facts:
tombstone:
removal_version: 3.0.0
warning_text: Use community.general.online_user_info instead.
- online_user_info:
- redirect: community.general.cloud.online.online_user_info
- open_iscsi:
- redirect: community.general.system.open_iscsi
- openbsd_pkg:
- redirect: community.general.packaging.os.openbsd_pkg
- opendj_backendprop:
- redirect: community.general.identity.opendj.opendj_backendprop
- openwrt_init:
- redirect: community.general.system.openwrt_init
- opkg:
- redirect: community.general.packaging.os.opkg
- osx_defaults:
- redirect: community.general.system.osx_defaults
- ovh_ip_failover:
- redirect: community.general.cloud.ovh.ovh_ip_failover
- ovh_ip_loadbalancing_backend:
- redirect: community.general.cloud.ovh.ovh_ip_loadbalancing_backend
- ovh_monthly_billing:
- redirect: community.general.cloud.ovh.ovh_monthly_billing
ovirt:
tombstone:
removal_version: 3.0.0
@@ -1089,64 +638,6 @@ plugin_routing:
tombstone:
removal_version: 3.0.0
warning_text: Use ovirt.ovirt.ovirt_vmpool_info instead.
- pacemaker_cluster:
- redirect: community.general.clustering.pacemaker_cluster
- packet_device:
- redirect: community.general.cloud.packet.packet_device
- packet_ip_subnet:
- redirect: community.general.cloud.packet.packet_ip_subnet
- packet_project:
- redirect: community.general.cloud.packet.packet_project
- packet_sshkey:
- redirect: community.general.cloud.packet.packet_sshkey
- packet_volume:
- redirect: community.general.cloud.packet.packet_volume
- packet_volume_attachment:
- redirect: community.general.cloud.packet.packet_volume_attachment
- pacman:
- redirect: community.general.packaging.os.pacman
- pacman_key:
- redirect: community.general.packaging.os.pacman_key
- pagerduty:
- redirect: community.general.monitoring.pagerduty
- pagerduty_alert:
- redirect: community.general.monitoring.pagerduty_alert
- pagerduty_change:
- redirect: community.general.monitoring.pagerduty_change
- pagerduty_user:
- redirect: community.general.monitoring.pagerduty_user
- pam_limits:
- redirect: community.general.system.pam_limits
- pamd:
- redirect: community.general.system.pamd
- parted:
- redirect: community.general.system.parted
- pear:
- redirect: community.general.packaging.language.pear
- pids:
- redirect: community.general.system.pids
- pingdom:
- redirect: community.general.monitoring.pingdom
- pip_package_info:
- redirect: community.general.packaging.language.pip_package_info
- pipx:
- redirect: community.general.packaging.language.pipx
- pkg5:
- redirect: community.general.packaging.os.pkg5
- pkg5_publisher:
- redirect: community.general.packaging.os.pkg5_publisher
- pkgin:
- redirect: community.general.packaging.os.pkgin
- pkgng:
- redirect: community.general.packaging.os.pkgng
- pkgutil:
- redirect: community.general.packaging.os.pkgutil
- pmem:
- redirect: community.general.storage.pmem.pmem
- portage:
- redirect: community.general.packaging.os.portage
- portinstall:
- redirect: community.general.packaging.os.portinstall
postgresql_copy:
redirect: community.postgresql.postgresql_copy
postgresql_db:
@@ -1191,50 +682,116 @@ plugin_routing:
redirect: community.postgresql.postgresql_user
postgresql_user_obj_stat_info:
redirect: community.postgresql.postgresql_user_obj_stat_info
- pritunl_org:
- redirect: community.general.net_tools.pritunl.pritunl_org
- pritunl_org_info:
- redirect: community.general.net_tools.pritunl.pritunl_org_info
- pritunl_user:
- redirect: community.general.net_tools.pritunl.pritunl_user
- pritunl_user_info:
- redirect: community.general.net_tools.pritunl.pritunl_user_info
profitbricks:
- redirect: community.general.cloud.profitbricks.profitbricks
+ tombstone:
+ removal_version: 11.0.0
+ warning_text: Supporting library is unsupported since 2021.
profitbricks_datacenter:
- redirect: community.general.cloud.profitbricks.profitbricks_datacenter
+ tombstone:
+ removal_version: 11.0.0
+ warning_text: Supporting library is unsupported since 2021.
profitbricks_nic:
- redirect: community.general.cloud.profitbricks.profitbricks_nic
+ tombstone:
+ removal_version: 11.0.0
+ warning_text: Supporting library is unsupported since 2021.
profitbricks_volume:
- redirect: community.general.cloud.profitbricks.profitbricks_volume
+ tombstone:
+ removal_version: 11.0.0
+ warning_text: Supporting library is unsupported since 2021.
profitbricks_volume_attachments:
- redirect: community.general.cloud.profitbricks.profitbricks_volume_attachments
+ tombstone:
+ removal_version: 11.0.0
+ warning_text: Supporting library is unsupported since 2021.
proxmox:
- redirect: community.general.cloud.misc.proxmox
+ redirect: community.proxmox.proxmox
+ deprecation:
+ removal_version: 15.0.0
+ warning_text: The proxmox content has been moved to community.proxmox.
+ proxmox_backup:
+ redirect: community.proxmox.proxmox_backup
+ deprecation:
+ removal_version: 15.0.0
+ warning_text: The proxmox content has been moved to community.proxmox.
+ proxmox_backup_info:
+ redirect: community.proxmox.proxmox_backup_info
+ deprecation:
+ removal_version: 15.0.0
+ warning_text: The proxmox content has been moved to community.proxmox.
+ proxmox_disk:
+ redirect: community.proxmox.proxmox_disk
+ deprecation:
+ removal_version: 15.0.0
+ warning_text: The proxmox content has been moved to community.proxmox.
proxmox_domain_info:
- redirect: community.general.cloud.misc.proxmox_domain_info
+ redirect: community.proxmox.proxmox_domain_info
+ deprecation:
+ removal_version: 15.0.0
+ warning_text: The proxmox content has been moved to community.proxmox.
proxmox_group_info:
- redirect: community.general.cloud.misc.proxmox_group_info
+ redirect: community.proxmox.proxmox_group_info
+ deprecation:
+ removal_version: 15.0.0
+ warning_text: The proxmox content has been moved to community.proxmox.
proxmox_kvm:
- redirect: community.general.cloud.misc.proxmox_kvm
+ redirect: community.proxmox.proxmox_kvm
+ deprecation:
+ removal_version: 15.0.0
+ warning_text: The proxmox content has been moved to community.proxmox.
proxmox_nic:
- redirect: community.general.cloud.misc.proxmox_nic
+ redirect: community.proxmox.proxmox_nic
+ deprecation:
+ removal_version: 15.0.0
+ warning_text: The proxmox content has been moved to community.proxmox.
+ proxmox_node_info:
+ redirect: community.proxmox.proxmox_node_info
+ deprecation:
+ removal_version: 15.0.0
+ warning_text: The proxmox content has been moved to community.proxmox.
+ proxmox_pool:
+ redirect: community.proxmox.proxmox_pool
+ deprecation:
+ removal_version: 15.0.0
+ warning_text: The proxmox content has been moved to community.proxmox.
+ proxmox_pool_member:
+ redirect: community.proxmox.proxmox_pool_member
+ deprecation:
+ removal_version: 15.0.0
+ warning_text: The proxmox content has been moved to community.proxmox.
proxmox_snap:
- redirect: community.general.cloud.misc.proxmox_snap
+ redirect: community.proxmox.proxmox_snap
+ deprecation:
+ removal_version: 15.0.0
+ warning_text: The proxmox content has been moved to community.proxmox.
+ proxmox_storage_contents_info:
+ redirect: community.proxmox.proxmox_storage_contents_info
+ deprecation:
+ removal_version: 15.0.0
+ warning_text: The proxmox content has been moved to community.proxmox.
proxmox_storage_info:
- redirect: community.general.cloud.misc.proxmox_storage_info
+ redirect: community.proxmox.proxmox_storage_info
+ deprecation:
+ removal_version: 15.0.0
+ warning_text: The proxmox content has been moved to community.proxmox.
proxmox_tasks_info:
- redirect: community.general.cloud.misc.proxmox_tasks_info
+ redirect: community.proxmox.proxmox_tasks_info
+ deprecation:
+ removal_version: 15.0.0
+ warning_text: The proxmox content has been moved to community.proxmox.
proxmox_template:
- redirect: community.general.cloud.misc.proxmox_template
+ redirect: community.proxmox.proxmox_template
+ deprecation:
+ removal_version: 15.0.0
+ warning_text: The proxmox content has been moved to community.proxmox.
proxmox_user_info:
- redirect: community.general.cloud.misc.proxmox_user_info
- pubnub_blocks:
- redirect: community.general.cloud.pubnub.pubnub_blocks
- pulp_repo:
- redirect: community.general.packaging.os.pulp_repo
- puppet:
- redirect: community.general.system.puppet
+ redirect: community.proxmox.proxmox_user_info
+ deprecation:
+ removal_version: 15.0.0
+ warning_text: The proxmox content has been moved to community.proxmox.
+ proxmox_vm_info:
+ redirect: community.proxmox.proxmox_vm_info
+ deprecation:
+ removal_version: 15.0.0
+ warning_text: The proxmox content has been moved to community.proxmox.
purefa_facts:
tombstone:
removal_version: 3.0.0
@@ -1243,212 +800,178 @@ plugin_routing:
tombstone:
removal_version: 3.0.0
warning_text: Use purestorage.flashblade.purefb_info instead.
- pushbullet:
- redirect: community.general.notification.pushbullet
- pushover:
- redirect: community.general.notification.pushover
python_requirements_facts:
tombstone:
removal_version: 3.0.0
warning_text: Use community.general.python_requirements_info instead.
- python_requirements_info:
- redirect: community.general.system.python_requirements_info
- rax:
- redirect: community.general.cloud.rackspace.rax
- rax_cbs:
- redirect: community.general.cloud.rackspace.rax_cbs
rax_cbs_attachments:
- redirect: community.general.cloud.rackspace.rax_cbs_attachments
- rax_cdb:
- redirect: community.general.cloud.rackspace.rax_cdb
+ tombstone:
+ removal_version: 9.0.0
+ warning_text: This module relied on the deprecated package pyrax.
+ rax_cbs:
+ tombstone:
+ removal_version: 9.0.0
+ warning_text: This module relied on the deprecated package pyrax.
rax_cdb_database:
- redirect: community.general.cloud.rackspace.rax_cdb_database
+ tombstone:
+ removal_version: 9.0.0
+ warning_text: This module relied on the deprecated package pyrax.
rax_cdb_user:
- redirect: community.general.cloud.rackspace.rax_cdb_user
- rax_clb:
- redirect: community.general.cloud.rackspace.rax_clb
+ tombstone:
+ removal_version: 9.0.0
+ warning_text: This module relied on the deprecated package pyrax.
+ rax_cdb:
+ tombstone:
+ removal_version: 9.0.0
+ warning_text: This module relied on the deprecated package pyrax.
rax_clb_nodes:
- redirect: community.general.cloud.rackspace.rax_clb_nodes
+ tombstone:
+ removal_version: 9.0.0
+ warning_text: This module relied on the deprecated package pyrax.
rax_clb_ssl:
- redirect: community.general.cloud.rackspace.rax_clb_ssl
- rax_dns:
- redirect: community.general.cloud.rackspace.rax_dns
+ tombstone:
+ removal_version: 9.0.0
+ warning_text: This module relied on the deprecated package pyrax.
+ rax_clb:
+ tombstone:
+ removal_version: 9.0.0
+ warning_text: This module relied on the deprecated package pyrax.
rax_dns_record:
- redirect: community.general.cloud.rackspace.rax_dns_record
+ tombstone:
+ removal_version: 9.0.0
+ warning_text: This module relied on the deprecated package pyrax.
+ rax_dns:
+ tombstone:
+ removal_version: 9.0.0
+ warning_text: This module relied on the deprecated package pyrax.
rax_facts:
- redirect: community.general.cloud.rackspace.rax_facts
- rax_files:
- redirect: community.general.cloud.rackspace.rax_files
+ tombstone:
+ removal_version: 9.0.0
+ warning_text: This module relied on the deprecated package pyrax.
rax_files_objects:
- redirect: community.general.cloud.rackspace.rax_files_objects
+ tombstone:
+ removal_version: 9.0.0
+ warning_text: This module relied on the deprecated package pyrax.
+ rax_files:
+ tombstone:
+ removal_version: 9.0.0
+ warning_text: This module relied on the deprecated package pyrax.
rax_identity:
- redirect: community.general.cloud.rackspace.rax_identity
+ tombstone:
+ removal_version: 9.0.0
+ warning_text: This module relied on the deprecated package pyrax.
rax_keypair:
- redirect: community.general.cloud.rackspace.rax_keypair
+ tombstone:
+ removal_version: 9.0.0
+ warning_text: This module relied on the deprecated package pyrax.
rax_meta:
- redirect: community.general.cloud.rackspace.rax_meta
+ tombstone:
+ removal_version: 9.0.0
+ warning_text: This module relied on the deprecated package pyrax.
rax_mon_alarm:
- redirect: community.general.cloud.rackspace.rax_mon_alarm
+ tombstone:
+ removal_version: 9.0.0
+ warning_text: This module relied on the deprecated package pyrax.
+ rax:
+ tombstone:
+ removal_version: 9.0.0
+ warning_text: This module relied on the deprecated package pyrax.
rax_mon_check:
- redirect: community.general.cloud.rackspace.rax_mon_check
+ tombstone:
+ removal_version: 9.0.0
+ warning_text: This module relied on the deprecated package pyrax.
rax_mon_entity:
- redirect: community.general.cloud.rackspace.rax_mon_entity
- rax_mon_notification:
- redirect: community.general.cloud.rackspace.rax_mon_notification
+ tombstone:
+ removal_version: 9.0.0
+ warning_text: This module relied on the deprecated package pyrax.
rax_mon_notification_plan:
- redirect: community.general.cloud.rackspace.rax_mon_notification_plan
+ tombstone:
+ removal_version: 9.0.0
+ warning_text: This module relied on the deprecated package pyrax.
+ rax_mon_notification:
+ tombstone:
+ removal_version: 9.0.0
+ warning_text: This module relied on the deprecated package pyrax.
rax_network:
- redirect: community.general.cloud.rackspace.rax_network
+ tombstone:
+ removal_version: 9.0.0
+ warning_text: This module relied on the deprecated package pyrax.
rax_queue:
- redirect: community.general.cloud.rackspace.rax_queue
+ tombstone:
+ removal_version: 9.0.0
+ warning_text: This module relied on the deprecated package pyrax.
rax_scaling_group:
- redirect: community.general.cloud.rackspace.rax_scaling_group
+ tombstone:
+ removal_version: 9.0.0
+ warning_text: This module relied on the deprecated package pyrax.
rax_scaling_policy:
- redirect: community.general.cloud.rackspace.rax_scaling_policy
- read_csv:
- redirect: community.general.files.read_csv
- redfish_command:
- redirect: community.general.remote_management.redfish.redfish_command
- redfish_config:
- redirect: community.general.remote_management.redfish.redfish_config
+ tombstone:
+ removal_version: 9.0.0
+ warning_text: This module relied on the deprecated package pyrax.
redfish_facts:
tombstone:
removal_version: 3.0.0
warning_text: Use community.general.redfish_info instead.
- redfish_info:
- redirect: community.general.remote_management.redfish.redfish_info
- redhat_subscription:
- redirect: community.general.packaging.os.redhat_subscription
- redis:
- redirect: community.general.database.misc.redis
- redis_data:
- redirect: community.general.database.misc.redis_data
- redis_data_incr:
- redirect: community.general.database.misc.redis_data_incr
- redis_data_info:
- redirect: community.general.database.misc.redis_data_info
- redis_info:
- redirect: community.general.database.misc.redis_info
- rhevm:
- redirect: community.general.cloud.misc.rhevm
rhn_channel:
- redirect: community.general.packaging.os.rhn_channel
+ tombstone:
+ removal_version: 10.0.0
+ warning_text: RHN is EOL.
rhn_register:
- redirect: community.general.packaging.os.rhn_register
- rhsm_release:
- redirect: community.general.packaging.os.rhsm_release
- rhsm_repository:
- redirect: community.general.packaging.os.rhsm_repository
- riak:
- redirect: community.general.database.misc.riak
- rocketchat:
- redirect: community.general.notification.rocketchat
- rollbar_deployment:
- redirect: community.general.monitoring.rollbar_deployment
- rpm_ostree_pkg:
- redirect: community.general.packaging.os.rpm_ostree_pkg
- rundeck_acl_policy:
- redirect: community.general.web_infrastructure.rundeck_acl_policy
- rundeck_job_executions_info:
- redirect: community.general.web_infrastructure.rundeck_job_executions_info
- rundeck_job_run:
- redirect: community.general.web_infrastructure.rundeck_job_run
- rundeck_project:
- redirect: community.general.web_infrastructure.rundeck_project
- runit:
- redirect: community.general.system.runit
- sap_task_list_execute:
- redirect: community.general.system.sap_task_list_execute
+ tombstone:
+ removal_version: 10.0.0
+ warning_text: RHN is EOL.
sapcar_extract:
- redirect: community.general.files.sapcar_extract
- say:
- redirect: community.general.notification.say
- scaleway_compute:
- redirect: community.general.cloud.scaleway.scaleway_compute
- scaleway_compute_private_network:
- redirect: community.general.cloud.scaleway.scaleway_compute_private_network
- scaleway_database_backup:
- redirect: community.general.cloud.scaleway.scaleway_database_backup
+ redirect: community.sap_libs.sapcar_extract
+ sap_task_list_execute:
+ redirect: community.sap_libs.sap_task_list_execute
scaleway_image_facts:
tombstone:
removal_version: 3.0.0
warning_text: Use community.general.scaleway_image_info instead.
- scaleway_image_info:
- redirect: community.general.cloud.scaleway.scaleway_image_info
- scaleway_ip:
- redirect: community.general.cloud.scaleway.scaleway_ip
scaleway_ip_facts:
tombstone:
removal_version: 3.0.0
warning_text: Use community.general.scaleway_ip_info instead.
- scaleway_ip_info:
- redirect: community.general.cloud.scaleway.scaleway_ip_info
- scaleway_lb:
- redirect: community.general.cloud.scaleway.scaleway_lb
scaleway_organization_facts:
tombstone:
removal_version: 3.0.0
warning_text: Use community.general.scaleway_organization_info instead.
- scaleway_organization_info:
- redirect: community.general.cloud.scaleway.scaleway_organization_info
- scaleway_private_network:
- redirect: community.general.cloud.scaleway.scaleway_private_network
- scaleway_security_group:
- redirect: community.general.cloud.scaleway.scaleway_security_group
scaleway_security_group_facts:
tombstone:
removal_version: 3.0.0
warning_text: Use community.general.scaleway_security_group_info instead.
- scaleway_security_group_info:
- redirect: community.general.cloud.scaleway.scaleway_security_group_info
- scaleway_security_group_rule:
- redirect: community.general.cloud.scaleway.scaleway_security_group_rule
scaleway_server_facts:
tombstone:
removal_version: 3.0.0
warning_text: Use community.general.scaleway_server_info instead.
- scaleway_server_info:
- redirect: community.general.cloud.scaleway.scaleway_server_info
scaleway_snapshot_facts:
tombstone:
removal_version: 3.0.0
warning_text: Use community.general.scaleway_snapshot_info instead.
- scaleway_snapshot_info:
- redirect: community.general.cloud.scaleway.scaleway_snapshot_info
- scaleway_sshkey:
- redirect: community.general.cloud.scaleway.scaleway_sshkey
- scaleway_user_data:
- redirect: community.general.cloud.scaleway.scaleway_user_data
- scaleway_volume:
- redirect: community.general.cloud.scaleway.scaleway_volume
scaleway_volume_facts:
tombstone:
removal_version: 3.0.0
warning_text: Use community.general.scaleway_volume_info instead.
- scaleway_volume_info:
- redirect: community.general.cloud.scaleway.scaleway_volume_info
- sefcontext:
- redirect: community.general.system.sefcontext
- selinux_permissive:
- redirect: community.general.system.selinux_permissive
- selogin:
- redirect: community.general.system.selogin
- sendgrid:
- redirect: community.general.notification.sendgrid
sensu_check:
- redirect: community.general.monitoring.sensu.sensu_check
+ deprecation:
+ removal_version: 13.0.0
+ warning_text: Sensu Core and Sensu Enterprise products have been End of Life since 2019/20.
sensu_client:
- redirect: community.general.monitoring.sensu.sensu_client
+ deprecation:
+ removal_version: 13.0.0
+ warning_text: Sensu Core and Sensu Enterprise products have been End of Life since 2019/20.
sensu_handler:
- redirect: community.general.monitoring.sensu.sensu_handler
+ deprecation:
+ removal_version: 13.0.0
+ warning_text: Sensu Core and Sensu Enterprise products have been End of Life since 2019/20.
sensu_silence:
- redirect: community.general.monitoring.sensu.sensu_silence
+ deprecation:
+ removal_version: 13.0.0
+ warning_text: Sensu Core and Sensu Enterprise products have been End of Life since 2019/20.
sensu_subscription:
- redirect: community.general.monitoring.sensu.sensu_subscription
- seport:
- redirect: community.general.system.seport
- serverless:
- redirect: community.general.cloud.misc.serverless
+ deprecation:
+ removal_version: 13.0.0
+ warning_text: Sensu Core and Sensu Enterprise products have been End of Life since 2019/20.
sf_account_manager:
tombstone:
removal_version: 2.0.0
@@ -1469,204 +992,53 @@ plugin_routing:
tombstone:
removal_version: 2.0.0
warning_text: Use netapp.elementsw.na_elementsw_volume instead.
- shutdown:
- redirect: community.general.system.shutdown
- sl_vm:
- redirect: community.general.cloud.softlayer.sl_vm
- slack:
- redirect: community.general.notification.slack
- slackpkg:
- redirect: community.general.packaging.os.slackpkg
smartos_image_facts:
tombstone:
removal_version: 3.0.0
warning_text: Use community.general.smartos_image_info instead.
- smartos_image_info:
- redirect: community.general.cloud.smartos.smartos_image_info
- snap:
- redirect: community.general.packaging.os.snap
- snap_alias:
- redirect: community.general.packaging.os.snap_alias
- snmp_facts:
- redirect: community.general.net_tools.snmp_facts
- solaris_zone:
- redirect: community.general.system.solaris_zone
- sorcery:
- redirect: community.general.packaging.os.sorcery
- spectrum_device:
- redirect: community.general.monitoring.spectrum_device
- spectrum_model_attrs:
- redirect: community.general.monitoring.spectrum_model_attrs
- spotinst_aws_elastigroup:
- redirect: community.general.cloud.spotinst.spotinst_aws_elastigroup
- ss_3par_cpg:
- redirect: community.general.storage.hpe3par.ss_3par_cpg
- ssh_config:
- redirect: community.general.system.ssh_config
stackdriver:
- redirect: community.general.monitoring.stackdriver
- stacki_host:
- redirect: community.general.remote_management.stacki.stacki_host
- statsd:
- redirect: community.general.monitoring.statsd
- statusio_maintenance:
- redirect: community.general.monitoring.statusio_maintenance
- sudoers:
- redirect: community.general.system.sudoers
- supervisorctl:
- redirect: community.general.web_infrastructure.supervisorctl
- svc:
- redirect: community.general.system.svc
- svr4pkg:
- redirect: community.general.packaging.os.svr4pkg
- swdepot:
- redirect: community.general.packaging.os.swdepot
- swupd:
- redirect: community.general.packaging.os.swupd
- syslogger:
- redirect: community.general.notification.syslogger
- syspatch:
- redirect: community.general.system.syspatch
- sysrc:
- redirect: community.general.system.sysrc
- sysupgrade:
- redirect: community.general.system.sysupgrade
- taiga_issue:
- redirect: community.general.web_infrastructure.taiga_issue
- telegram:
- redirect: community.general.notification.telegram
- terraform:
- redirect: community.general.cloud.misc.terraform
- timezone:
- redirect: community.general.system.timezone
- twilio:
- redirect: community.general.notification.twilio
+ tombstone:
+ removal_version: 9.0.0
+ warning_text: This module relied on HTTPS APIs that do not exist anymore,
+ and any new development in the direction of providing an alternative should
+ happen in the context of the google.cloud collection.
typetalk:
- redirect: community.general.notification.typetalk
- udm_dns_record:
- redirect: community.general.cloud.univention.udm_dns_record
- udm_dns_zone:
- redirect: community.general.cloud.univention.udm_dns_zone
- udm_group:
- redirect: community.general.cloud.univention.udm_group
- udm_share:
- redirect: community.general.cloud.univention.udm_share
- udm_user:
- redirect: community.general.cloud.univention.udm_user
- ufw:
- redirect: community.general.system.ufw
- uptimerobot:
- redirect: community.general.monitoring.uptimerobot
- urpmi:
- redirect: community.general.packaging.os.urpmi
- utm_aaa_group:
- redirect: community.general.web_infrastructure.sophos_utm.utm_aaa_group
- utm_aaa_group_info:
- redirect: community.general.web_infrastructure.sophos_utm.utm_aaa_group_info
- utm_ca_host_key_cert:
- redirect: community.general.web_infrastructure.sophos_utm.utm_ca_host_key_cert
- utm_ca_host_key_cert_info:
- redirect: community.general.web_infrastructure.sophos_utm.utm_ca_host_key_cert_info
- utm_dns_host:
- redirect: community.general.web_infrastructure.sophos_utm.utm_dns_host
- utm_network_interface_address:
- redirect: community.general.web_infrastructure.sophos_utm.utm_network_interface_address
- utm_network_interface_address_info:
- redirect: community.general.web_infrastructure.sophos_utm.utm_network_interface_address_info
- utm_proxy_auth_profile:
- redirect: community.general.web_infrastructure.sophos_utm.utm_proxy_auth_profile
- utm_proxy_exception:
- redirect: community.general.web_infrastructure.sophos_utm.utm_proxy_exception
- utm_proxy_frontend:
- redirect: community.general.web_infrastructure.sophos_utm.utm_proxy_frontend
- utm_proxy_frontend_info:
- redirect: community.general.web_infrastructure.sophos_utm.utm_proxy_frontend_info
- utm_proxy_location:
- redirect: community.general.web_infrastructure.sophos_utm.utm_proxy_location
- utm_proxy_location_info:
- redirect: community.general.web_infrastructure.sophos_utm.utm_proxy_location_info
- vdo:
- redirect: community.general.system.vdo
- vertica_configuration:
- redirect: community.general.database.vertica.vertica_configuration
+ deprecation:
+ removal_version: 13.0.0
+ warning_text: The typetalk service will be discontinued on Dec 2025.
vertica_facts:
tombstone:
removal_version: 3.0.0
warning_text: Use community.general.vertica_info instead.
- vertica_info:
- redirect: community.general.database.vertica.vertica_info
- vertica_role:
- redirect: community.general.database.vertica.vertica_role
- vertica_schema:
- redirect: community.general.database.vertica.vertica_schema
- vertica_user:
- redirect: community.general.database.vertica.vertica_user
- vexata_eg:
- redirect: community.general.storage.vexata.vexata_eg
- vexata_volume:
- redirect: community.general.storage.vexata.vexata_volume
- vmadm:
- redirect: community.general.cloud.smartos.vmadm
- wakeonlan:
- redirect: community.general.remote_management.wakeonlan
- wdc_redfish_command:
- redirect: community.general.remote_management.redfish.wdc_redfish_command
- wdc_redfish_info:
- redirect: community.general.remote_management.redfish.wdc_redfish_info
webfaction_app:
- redirect: community.general.cloud.webfaction.webfaction_app
+ tombstone:
+ removal_version: 9.0.0
+ warning_text: This module relied on HTTPS APIs that do not exist anymore and
+ there is no clear path to update.
webfaction_db:
- redirect: community.general.cloud.webfaction.webfaction_db
+ tombstone:
+ removal_version: 9.0.0
+ warning_text: This module relied on HTTPS APIs that do not exist anymore and
+ there is no clear path to update.
webfaction_domain:
- redirect: community.general.cloud.webfaction.webfaction_domain
+ tombstone:
+ removal_version: 9.0.0
+ warning_text: This module relied on HTTPS APIs that do not exist anymore and
+ there is no clear path to update.
webfaction_mailbox:
- redirect: community.general.cloud.webfaction.webfaction_mailbox
+ tombstone:
+ removal_version: 9.0.0
+ warning_text: This module relied on HTTPS APIs that do not exist anymore and
+ there is no clear path to update.
webfaction_site:
- redirect: community.general.cloud.webfaction.webfaction_site
- xattr:
- redirect: community.general.files.xattr
- xbps:
- redirect: community.general.packaging.os.xbps
- xcc_redfish_command:
- redirect: community.general.remote_management.lenovoxcc.xcc_redfish_command
- xenserver_facts:
- redirect: community.general.cloud.misc.xenserver_facts
- xenserver_guest:
- redirect: community.general.cloud.xenserver.xenserver_guest
+ tombstone:
+ removal_version: 9.0.0
+ warning_text: This module relied on HTTPS APIs that do not exist anymore and
+ there is no clear path to update.
xenserver_guest_facts:
tombstone:
removal_version: 3.0.0
warning_text: Use community.general.xenserver_guest_info instead.
- xenserver_guest_info:
- redirect: community.general.cloud.xenserver.xenserver_guest_info
- xenserver_guest_powerstate:
- redirect: community.general.cloud.xenserver.xenserver_guest_powerstate
- xfconf:
- redirect: community.general.system.xfconf
- xfconf_info:
- redirect: community.general.system.xfconf_info
- xfs_quota:
- redirect: community.general.system.xfs_quota
- xml:
- redirect: community.general.files.xml
- yarn:
- redirect: community.general.packaging.language.yarn
- yum_versionlock:
- redirect: community.general.packaging.os.yum_versionlock
- zfs:
- redirect: community.general.storage.zfs.zfs
- zfs_delegate_admin:
- redirect: community.general.storage.zfs.zfs_delegate_admin
- zfs_facts:
- redirect: community.general.storage.zfs.zfs_facts
- znode:
- redirect: community.general.clustering.znode
- zpool_facts:
- redirect: community.general.storage.zfs.zpool_facts
- zypper:
- redirect: community.general.packaging.os.zypper
- zypper_repository:
- redirect: community.general.packaging.os.zypper_repository
doc_fragments:
_gcp:
redirect: community.google._gcp
@@ -1680,8 +1052,46 @@ plugin_routing:
redirect: community.kubevirt.kubevirt_vm_options
nios:
redirect: infoblox.nios_modules.nios
+ oracle:
+ deprecation:
+ removal_version: 13.0.0
+ warning_text: Code is unmaintained here and official Oracle collection is available for a number of years.
+ oracle_creatable_resource:
+ deprecation:
+ removal_version: 13.0.0
+ warning_text: Code is unmaintained here and official Oracle collection is available for a number of years.
+ oracle_display_name_option:
+ deprecation:
+ removal_version: 13.0.0
+ warning_text: Code is unmaintained here and official Oracle collection is available for a number of years.
+ oracle_name_option:
+ deprecation:
+ removal_version: 13.0.0
+ warning_text: Code is unmaintained here and official Oracle collection is available for a number of years.
+ oracle_tags:
+ deprecation:
+ removal_version: 13.0.0
+ warning_text: Code is unmaintained here and official Oracle collection is available for a number of years.
+ oracle_wait_options:
+ deprecation:
+ removal_version: 13.0.0
+ warning_text: Code is unmaintained here and official Oracle collection is available for a number of years.
postgresql:
redirect: community.postgresql.postgresql
+ proxmox:
+ redirect: community.proxmox.proxmox
+ deprecation:
+ removal_version: 15.0.0
+ warning_text: The proxmox content has been moved to community.proxmox.
+ purestorage:
+ tombstone:
+ removal_version: 12.0.0
+ warning_text: The modules for purestorage were removed in community.general 3.0.0, this document fragment was left behind.
+ rackspace:
+ tombstone:
+ removal_version: 9.0.0
+ warning_text: This doc fragment was used by rax modules, that relied on the deprecated
+ package pyrax.
module_utils:
docker.common:
redirect: community.docker.common
@@ -1699,37 +1109,45 @@ plugin_routing:
redirect: community.kubevirt.kubevirt
net_tools.nios.api:
redirect: infoblox.nios_modules.api
+ oci_utils:
+ deprecation:
+ removal_version: 13.0.0
+ warning_text: Code is unmaintained here and official Oracle collection is available for a number of years.
postgresql:
redirect: community.postgresql.postgresql
+ proxmox:
+ redirect: community.proxmox.proxmox
+ deprecation:
+ removal_version: 15.0.0
+ warning_text: The proxmox content has been moved to community.proxmox.
+ pure:
+ tombstone:
+ removal_version: 12.0.0
+ warning_text: The modules for purestorage were removed in community.general 3.0.0, this module util was left behind.
+ rax:
+ tombstone:
+ removal_version: 9.0.0
+ warning_text: This module util relied on the deprecated package pyrax.
remote_management.dellemc.dellemc_idrac:
redirect: dellemc.openmanage.dellemc_idrac
remote_management.dellemc.ome:
redirect: dellemc.openmanage.ome
- callback:
- actionable:
- tombstone:
- removal_version: 2.0.0
- warning_text: Use the 'default' callback plugin with 'display_skipped_hosts
- = no' and 'display_ok_hosts = no' options.
- full_skip:
- tombstone:
- removal_version: 2.0.0
- warning_text: Use the 'default' callback plugin with 'display_skipped_hosts
- = no' option.
- osx_say:
- redirect: community.general.say
- stderr:
- tombstone:
- removal_version: 2.0.0
- warning_text: Use the 'default' callback plugin with 'display_failed_stderr
- = yes' option.
inventory:
docker_machine:
redirect: community.docker.docker_machine
docker_swarm:
redirect: community.docker.docker_swarm
+ proxmox:
+ redirect: community.proxmox.proxmox
+ deprecation:
+ removal_version: 15.0.0
+ warning_text: The proxmox content has been moved to community.proxmox.
kubevirt:
redirect: community.kubevirt.kubevirt
+ stackpath_compute:
+ tombstone:
+ removal_version: 11.0.0
+ warning_text: The company and the service were sunset in June 2024.
filter:
path_join:
# The ansible.builtin.path_join filter has been added in ansible-base 2.10.
@@ -1740,8 +1158,3 @@ plugin_routing:
# for Ansible 2.9 or earlier. Now we only will have the redirect until we
# eventually will deprecate and then remove it.
redirect: ansible.builtin.path_join
- action:
- iptables_state:
- redirect: community.general.system.iptables_state
- shutdown:
- redirect: community.general.system.shutdown
diff --git a/noxfile.py b/noxfile.py
new file mode 100644
index 0000000000..9b2f92a9e1
--- /dev/null
+++ b/noxfile.py
@@ -0,0 +1,38 @@
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+# SPDX-FileCopyrightText: 2025 Felix Fontein
+
+# /// script
+# dependencies = ["nox>=2025.02.09", "antsibull-nox"]
+# ///
+
+import sys
+
+import nox
+
+
+try:
+ import antsibull_nox
+except ImportError:
+ print("You need to install antsibull-nox in the same Python environment as nox.")
+ sys.exit(1)
+
+
+antsibull_nox.load_antsibull_nox_toml()
+
+
+@nox.session(name="aliases", python=False, default=True)
+def aliases(session: nox.Session) -> None:
+ session.run("python", "tests/sanity/extra/aliases.py")
+
+
+@nox.session(name="botmeta", default=True)
+def botmeta(session: nox.Session) -> None:
+ session.install("PyYAML", "voluptuous")
+ session.run("python", "tests/sanity/extra/botmeta.py")
+
+
+# Allow to run the noxfile with `python noxfile.py`, `pipx run noxfile.py`, or similar.
+# Requires nox >= 2025.02.09
+if __name__ == "__main__":
+ nox.main()
diff --git a/plugins/action/system/iptables_state.py b/plugins/action/iptables_state.py
similarity index 75%
rename from plugins/action/system/iptables_state.py
rename to plugins/action/iptables_state.py
index f59a7298b6..dd6724476f 100644
--- a/plugins/action/system/iptables_state.py
+++ b/plugins/action/iptables_state.py
@@ -1,10 +1,8 @@
-# -*- coding: utf-8 -*-
# Copyright (c) 2020, quidame
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
+from __future__ import annotations
import time
@@ -22,29 +20,37 @@ class ActionModule(ActionBase):
_VALID_ARGS = frozenset(('path', 'state', 'table', 'noflush', 'counters', 'modprobe', 'ip_version', 'wait'))
DEFAULT_SUDOABLE = True
- MSG_ERROR__ASYNC_AND_POLL_NOT_ZERO = (
- "This module doesn't support async>0 and poll>0 when its 'state' param "
- "is set to 'restored'. To enable its rollback feature (that needs the "
- "module to run asynchronously on the remote), please set task attribute "
- "'poll' (=%s) to 0, and 'async' (=%s) to a value >2 and not greater than "
- "'ansible_timeout' (=%s) (recommended).")
- MSG_WARNING__NO_ASYNC_IS_NO_ROLLBACK = (
- "Attempts to restore iptables state without rollback in case of mistake "
- "may lead the ansible controller to loose access to the hosts and never "
- "regain it before fixing firewall rules through a serial console, or any "
- "other way except SSH. Please set task attribute 'poll' (=%s) to 0, and "
- "'async' (=%s) to a value >2 and not greater than 'ansible_timeout' (=%s) "
- "(recommended).")
- MSG_WARNING__ASYNC_GREATER_THAN_TIMEOUT = (
- "You attempt to restore iptables state with rollback in case of mistake, "
- "but with settings that will lead this rollback to happen AFTER that the "
- "controller will reach its own timeout. Please set task attribute 'poll' "
- "(=%s) to 0, and 'async' (=%s) to a value >2 and not greater than "
- "'ansible_timeout' (=%s) (recommended).")
+ @staticmethod
+ def msg_error__async_and_poll_not_zero(task_poll, task_async, max_timeout):
+ return (
+ "This module doesn't support async>0 and poll>0 when its 'state' param "
+ "is set to 'restored'. To enable its rollback feature (that needs the "
+ "module to run asynchronously on the remote), please set task attribute "
+ f"'poll' (={task_poll}) to 0, and 'async' (={task_async}) to a value >2 and not greater than "
+ f"'ansible_timeout' (={max_timeout}) (recommended).")
+
+ @staticmethod
+ def msg_warning__no_async_is_no_rollback(task_poll, task_async, max_timeout):
+ return (
+ "Attempts to restore iptables state without rollback in case of mistake "
+ "may lead the ansible controller to loose access to the hosts and never "
+ "regain it before fixing firewall rules through a serial console, or any "
+ f"other way except SSH. Please set task attribute 'poll' (={task_poll}) to 0, and "
+ f"'async' (={task_async}) to a value >2 and not greater than 'ansible_timeout' (={max_timeout}) "
+ "(recommended).")
+
+ @staticmethod
+ def msg_warning__async_greater_than_timeout(task_poll, task_async, max_timeout):
+ return (
+ "You attempt to restore iptables state with rollback in case of mistake, "
+ "but with settings that will lead this rollback to happen AFTER that the "
+ "controller will reach its own timeout. Please set task attribute 'poll' "
+ f"(={task_poll}) to 0, and 'async' (={task_async}) to a value >2 and not greater than "
+ f"'ansible_timeout' (={max_timeout}) (recommended).")
def _async_result(self, async_status_args, task_vars, timeout):
'''
- Retrieve results of the asynchonous task, and display them in place of
+ Retrieve results of the asynchronous task, and display them in place of
the async wrapper results (those with the ansible_job_id key).
'''
async_status = self._task.copy()
@@ -88,21 +94,25 @@ class ActionModule(ActionBase):
max_timeout = self._connection._play_context.timeout
module_args = self._task.args
+ async_status_args = {}
+ starter_cmd = None
+ confirm_cmd = None
+
if module_args.get('state', None) == 'restored':
if not wrap_async:
if not check_mode:
- display.warning(self.MSG_WARNING__NO_ASYNC_IS_NO_ROLLBACK % (
+ display.warning(self.msg_error__async_and_poll_not_zero(
task_poll,
task_async,
max_timeout))
elif task_poll:
- raise AnsibleActionFail(self.MSG_ERROR__ASYNC_AND_POLL_NOT_ZERO % (
+ raise AnsibleActionFail(self.msg_warning__no_async_is_no_rollback(
task_poll,
task_async,
max_timeout))
else:
if task_async > max_timeout and not check_mode:
- display.warning(self.MSG_WARNING__ASYNC_GREATER_THAN_TIMEOUT % (
+ display.warning(self.msg_warning__async_greater_than_timeout(
task_poll,
task_async,
max_timeout))
@@ -115,10 +125,10 @@ class ActionModule(ActionBase):
# remote and local sides (if not the same, make the loop
# longer on the controller); and set a backup file path.
module_args['_timeout'] = task_async
- module_args['_back'] = '%s/iptables.state' % async_dir
+ module_args['_back'] = f'{async_dir}/iptables.state'
async_status_args = dict(mode='status')
- confirm_cmd = 'rm -f %s' % module_args['_back']
- starter_cmd = 'touch %s.starter' % module_args['_back']
+ confirm_cmd = f"rm -f {module_args['_back']}"
+ starter_cmd = f"touch {module_args['_back']}.starter"
remaining_time = max(task_async, max_timeout)
# do work!
diff --git a/plugins/action/system/shutdown.py b/plugins/action/shutdown.py
similarity index 64%
rename from plugins/action/system/shutdown.py
rename to plugins/action/shutdown.py
index c2860f1d6f..d2a9d3c2b7 100644
--- a/plugins/action/system/shutdown.py
+++ b/plugins/action/shutdown.py
@@ -1,12 +1,11 @@
-# -*- coding: utf-8 -*-
# Copyright (c) 2020, Amin Vakil
# Copyright (c) 2016-2018, Matt Davis
# Copyright (c) 2018, Sam Doran
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
+
from ansible.errors import AnsibleError, AnsibleConnectionFailure
from ansible.module_utils.common.text.converters import to_native, to_text
@@ -17,6 +16,10 @@ from ansible.utils.display import Display
display = Display()
+def fmt(mapping, key):
+ return to_native(mapping[key]).strip()
+
+
class TimedOutException(Exception):
pass
@@ -44,7 +47,7 @@ class ActionModule(ActionBase):
SHUTDOWN_COMMAND_ARGS = {
'alpine': '',
'void': '-h +{delay_min} "{message}"',
- 'freebsd': '-h +{delay_sec}s "{message}"',
+ 'freebsd': '-p +{delay_sec}s "{message}"',
'linux': DEFAULT_SHUTDOWN_COMMAND_ARGS,
'macosx': '-h +{delay_min} "{message}"',
'openbsd': '-h +{delay_min} "{message}"',
@@ -80,35 +83,41 @@ class ActionModule(ActionBase):
getattr(self, default_value))))
return value
- def get_shutdown_command_args(self, distribution):
- args = self._get_value_from_facts('SHUTDOWN_COMMAND_ARGS', distribution, 'DEFAULT_SHUTDOWN_COMMAND_ARGS')
- # Convert seconds to minutes. If less that 60, set it to 0.
- delay_sec = self.delay
- shutdown_message = self._task.args.get('msg', self.DEFAULT_SHUTDOWN_MESSAGE)
- return args.format(delay_sec=delay_sec, delay_min=delay_sec // 60, message=shutdown_message)
-
def get_distribution(self, task_vars):
# FIXME: only execute the module if we don't already have the facts we need
distribution = {}
- display.debug('{action}: running setup module to get distribution'.format(action=self._task.action))
+ display.debug(f'{self._task.action}: running setup module to get distribution')
module_output = self._execute_module(
task_vars=task_vars,
module_name='ansible.legacy.setup',
module_args={'gather_subset': 'min'})
try:
if module_output.get('failed', False):
- raise AnsibleError('Failed to determine system distribution. {0}, {1}'.format(
- to_native(module_output['module_stdout']).strip(),
- to_native(module_output['module_stderr']).strip()))
+ raise AnsibleError(f"Failed to determine system distribution. {fmt(module_output, 'module_stdout')}, {fmt(module_output, 'module_stderr')}")
distribution['name'] = module_output['ansible_facts']['ansible_distribution'].lower()
- distribution['version'] = to_text(module_output['ansible_facts']['ansible_distribution_version'].split('.')[0])
+ distribution['version'] = to_text(
+ module_output['ansible_facts']['ansible_distribution_version'].split('.')[0])
distribution['family'] = to_text(module_output['ansible_facts']['ansible_os_family'].lower())
- display.debug("{action}: distribution: {dist}".format(action=self._task.action, dist=distribution))
+ display.debug(f"{self._task.action}: distribution: {distribution}")
return distribution
except KeyError as ke:
- raise AnsibleError('Failed to get distribution information. Missing "{0}" in output.'.format(ke.args[0]))
+ raise AnsibleError(f'Failed to get distribution information. Missing "{ke.args[0]}" in output.')
def get_shutdown_command(self, task_vars, distribution):
+ def find_command(command, find_search_paths):
+ display.debug(f'{self._task.action}: running find module looking in {find_search_paths} to get path for "{command}"')
+ find_result = self._execute_module(
+ task_vars=task_vars,
+ # prevent collection search by calling with ansible.legacy (still allows library/ override of find)
+ module_name='ansible.legacy.find',
+ module_args={
+ 'paths': find_search_paths,
+ 'patterns': [command],
+ 'file_type': 'any'
+ }
+ )
+ return [x['path'] for x in find_result['files']]
+
shutdown_bin = self._get_value_from_facts('SHUTDOWN_COMMANDS', distribution, 'DEFAULT_SHUTDOWN_COMMAND')
default_search_paths = ['/sbin', '/usr/sbin', '/usr/local/sbin']
search_paths = self._task.args.get('search_paths', default_search_paths)
@@ -118,62 +127,61 @@ class ActionModule(ActionBase):
if is_string(search_paths):
search_paths = [search_paths]
- # Error if we didn't get a list
- err_msg = "'search_paths' must be a string or flat list of strings, got {0}"
try:
incorrect_type = any(not is_string(x) for x in search_paths)
if not isinstance(search_paths, list) or incorrect_type:
raise TypeError
except TypeError:
- raise AnsibleError(err_msg.format(search_paths))
+ # Error if we didn't get a list
+ err_msg = f"'search_paths' must be a string or flat list of strings, got {search_paths}"
+ raise AnsibleError(err_msg)
- display.debug('{action}: running find module looking in {paths} to get path for "{command}"'.format(
- action=self._task.action,
- command=shutdown_bin,
- paths=search_paths))
- find_result = self._execute_module(
- task_vars=task_vars,
- # prevent collection search by calling with ansible.legacy (still allows library/ override of find)
- module_name='ansible.legacy.find',
- module_args={
- 'paths': search_paths,
- 'patterns': [shutdown_bin],
- 'file_type': 'any'
- }
- )
+ full_path = find_command(shutdown_bin, search_paths) # find the path to the shutdown command
+ if not full_path: # if we could not find the shutdown command
- full_path = [x['path'] for x in find_result['files']]
- if not full_path:
- raise AnsibleError('Unable to find command "{0}" in search paths: {1}'.format(shutdown_bin, search_paths))
- self._shutdown_command = full_path[0]
- return self._shutdown_command
+ # tell the user we will try with systemd
+ display.vvv(f'Unable to find command "{shutdown_bin}" in search paths: {search_paths}, will attempt a shutdown using systemd directly.')
+ systemctl_search_paths = ['/bin', '/usr/bin']
+ full_path = find_command('systemctl', systemctl_search_paths) # find the path to the systemctl command
+ if not full_path: # if we couldn't find systemctl
+ raise AnsibleError(
+ f'Could not find command "{shutdown_bin}" in search paths: {search_paths} or systemctl'
+ f' command in search paths: {systemctl_search_paths}, unable to shutdown.') # we give up here
+ else:
+ return f"{full_path[0]} poweroff" # done, since we cannot use args with systemd shutdown
+
+ # systemd case taken care of, here we add args to the command
+ args = self._get_value_from_facts('SHUTDOWN_COMMAND_ARGS', distribution, 'DEFAULT_SHUTDOWN_COMMAND_ARGS')
+ # Convert seconds to minutes. If less that 60, set it to 0.
+ delay_sec = self.delay
+ shutdown_message = self._task.args.get('msg', self.DEFAULT_SHUTDOWN_MESSAGE)
+
+ af = args.format(delay_sec=delay_sec, delay_min=delay_sec // 60, message=shutdown_message)
+ return f'{full_path[0]} {af}'
def perform_shutdown(self, task_vars, distribution):
result = {}
shutdown_result = {}
- shutdown_command = self.get_shutdown_command(task_vars, distribution)
- shutdown_command_args = self.get_shutdown_command_args(distribution)
- shutdown_command_exec = '{0} {1}'.format(shutdown_command, shutdown_command_args)
+ shutdown_command_exec = self.get_shutdown_command(task_vars, distribution)
self.cleanup(force=True)
try:
- display.vvv("{action}: shutting down server...".format(action=self._task.action))
- display.debug("{action}: shutting down server with command '{command}'".format(action=self._task.action, command=shutdown_command_exec))
+ display.vvv(f"{self._task.action}: shutting down server...")
+ display.debug(f"{self._task.action}: shutting down server with command '{shutdown_command_exec}'")
if self._play_context.check_mode:
shutdown_result['rc'] = 0
else:
shutdown_result = self._low_level_execute_command(shutdown_command_exec, sudoable=self.DEFAULT_SUDOABLE)
except AnsibleConnectionFailure as e:
# If the connection is closed too quickly due to the system being shutdown, carry on
- display.debug('{action}: AnsibleConnectionFailure caught and handled: {error}'.format(action=self._task.action, error=to_text(e)))
+ display.debug(
+ f'{self._task.action}: AnsibleConnectionFailure caught and handled: {e}')
shutdown_result['rc'] = 0
if shutdown_result['rc'] != 0:
result['failed'] = True
result['shutdown'] = False
- result['msg'] = "Shutdown command failed. Error was {stdout}, {stderr}".format(
- stdout=to_native(shutdown_result['stdout'].strip()),
- stderr=to_native(shutdown_result['stderr'].strip()))
+ result['msg'] = f"Shutdown command failed. Error was {fmt(shutdown_result, 'stdout')}, {fmt(shutdown_result, 'stderr')}"
return result
result['failed'] = False
@@ -186,7 +194,7 @@ class ActionModule(ActionBase):
# If running with local connection, fail so we don't shutdown ourself
if self._connection.transport == 'local' and (not self._play_context.check_mode):
- msg = 'Running {0} with local connection would shutdown the control node.'.format(self._task.action)
+ msg = f'Running {self._task.action} with local connection would shutdown the control node.'
return {'changed': False, 'elapsed': 0, 'shutdown': False, 'failed': True, 'msg': msg}
if task_vars is None:
diff --git a/plugins/become/doas.py b/plugins/become/doas.py
index d282e96851..84efe31ac4 100644
--- a/plugins/become/doas.py
+++ b/plugins/become/doas.py
@@ -1,84 +1,91 @@
-# -*- coding: utf-8 -*-
# Copyright (c) 2018, Ansible Project
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
-DOCUMENTATION = '''
- name: doas
- short_description: Do As user
+DOCUMENTATION = r"""
+name: doas
+short_description: Do As user
+description:
+ - This become plugins allows your remote/login user to execute commands as another user using the C(doas) utility.
+author: Ansible Core Team
+options:
+ become_user:
+ description: User you 'become' to execute the task.
+ type: string
+ ini:
+ - section: privilege_escalation
+ key: become_user
+ - section: doas_become_plugin
+ key: user
+ vars:
+ - name: ansible_become_user
+ - name: ansible_doas_user
+ env:
+ - name: ANSIBLE_BECOME_USER
+ - name: ANSIBLE_DOAS_USER
+ become_exe:
+ description: C(doas) executable.
+ type: string
+ default: doas
+ ini:
+ - section: privilege_escalation
+ key: become_exe
+ - section: doas_become_plugin
+ key: executable
+ vars:
+ - name: ansible_become_exe
+ - name: ansible_doas_exe
+ env:
+ - name: ANSIBLE_BECOME_EXE
+ - name: ANSIBLE_DOAS_EXE
+ become_flags:
+ description: Options to pass to C(doas).
+ type: string
+ default: ''
+ ini:
+ - section: privilege_escalation
+ key: become_flags
+ - section: doas_become_plugin
+ key: flags
+ vars:
+ - name: ansible_become_flags
+ - name: ansible_doas_flags
+ env:
+ - name: ANSIBLE_BECOME_FLAGS
+ - name: ANSIBLE_DOAS_FLAGS
+ become_pass:
+ description: Password for C(doas) prompt.
+ type: string
+ required: false
+ vars:
+ - name: ansible_become_password
+ - name: ansible_become_pass
+ - name: ansible_doas_pass
+ env:
+ - name: ANSIBLE_BECOME_PASS
+ - name: ANSIBLE_DOAS_PASS
+ ini:
+ - section: doas_become_plugin
+ key: password
+ prompt_l10n:
description:
- - This become plugins allows your remote/login user to execute commands as another user via the doas utility.
- author: Ansible Core Team
- options:
- become_user:
- description: User you 'become' to execute the task
- ini:
- - section: privilege_escalation
- key: become_user
- - section: doas_become_plugin
- key: user
- vars:
- - name: ansible_become_user
- - name: ansible_doas_user
- env:
- - name: ANSIBLE_BECOME_USER
- - name: ANSIBLE_DOAS_USER
- become_exe:
- description: Doas executable
- default: doas
- ini:
- - section: privilege_escalation
- key: become_exe
- - section: doas_become_plugin
- key: executable
- vars:
- - name: ansible_become_exe
- - name: ansible_doas_exe
- env:
- - name: ANSIBLE_BECOME_EXE
- - name: ANSIBLE_DOAS_EXE
- become_flags:
- description: Options to pass to doas
- default: ''
- ini:
- - section: privilege_escalation
- key: become_flags
- - section: doas_become_plugin
- key: flags
- vars:
- - name: ansible_become_flags
- - name: ansible_doas_flags
- env:
- - name: ANSIBLE_BECOME_FLAGS
- - name: ANSIBLE_DOAS_FLAGS
- become_pass:
- description: password for doas prompt
- required: False
- vars:
- - name: ansible_become_password
- - name: ansible_become_pass
- - name: ansible_doas_pass
- env:
- - name: ANSIBLE_BECOME_PASS
- - name: ANSIBLE_DOAS_PASS
- ini:
- - section: doas_become_plugin
- key: password
- prompt_l10n:
- description:
- - List of localized strings to match for prompt detection
- - If empty we'll use the built in one
- default: []
- ini:
- - section: doas_become_plugin
- key: localized_prompts
- vars:
- - name: ansible_doas_prompt_l10n
- env:
- - name: ANSIBLE_DOAS_PROMPT_L10N
-'''
+ - List of localized strings to match for prompt detection.
+ - If empty the plugin uses the built-in one.
+ type: list
+ elements: string
+ default: []
+ ini:
+ - section: doas_become_plugin
+ key: localized_prompts
+ vars:
+ - name: ansible_doas_prompt_l10n
+ env:
+ - name: ANSIBLE_DOAS_PROMPT_L10N
+notes:
+ - This become plugin does not work when connection pipelining is enabled. With ansible-core 2.19+, using it automatically
+ disables pipelining. On ansible-core 2.18 and before, pipelining must explicitly be disabled by the user.
+"""
import re
@@ -94,6 +101,10 @@ class BecomeModule(BecomeBase):
fail = ('Permission denied',)
missing = ('Authorization required',)
+ # See https://github.com/ansible-collections/community.general/issues/9977,
+ # https://github.com/ansible/ansible/pull/78111
+ pipelining = False
+
def check_password_prompt(self, b_output):
''' checks if the expected password prompt exists in b_output '''
@@ -119,9 +130,9 @@ class BecomeModule(BecomeBase):
flags += ' -n'
become_user = self.get_option('become_user')
- user = '-u %s' % (become_user) if become_user else ''
+ user = f'-u {become_user}' if become_user else ''
success_cmd = self._build_success_command(cmd, shell, noexe=True)
executable = getattr(shell, 'executable', shell.SHELL_FAMILY)
- return '%s %s %s %s -c %s' % (become_exe, flags, user, executable, success_cmd)
+ return f'{become_exe} {flags} {user} {executable} -c {success_cmd}'
diff --git a/plugins/become/dzdo.py b/plugins/become/dzdo.py
index b3c34f377c..dad05eb34e 100644
--- a/plugins/become/dzdo.py
+++ b/plugins/become/dzdo.py
@@ -1,72 +1,74 @@
-# -*- coding: utf-8 -*-
# Copyright (c) 2018, Ansible Project
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
-DOCUMENTATION = '''
- name: dzdo
- short_description: Centrify's Direct Authorize
- description:
- - This become plugins allows your remote/login user to execute commands as another user via the dzdo utility.
- author: Ansible Core Team
- options:
- become_user:
- description: User you 'become' to execute the task
- ini:
- - section: privilege_escalation
- key: become_user
- - section: dzdo_become_plugin
- key: user
- vars:
- - name: ansible_become_user
- - name: ansible_dzdo_user
- env:
- - name: ANSIBLE_BECOME_USER
- - name: ANSIBLE_DZDO_USER
- become_exe:
- description: Dzdo executable
- default: dzdo
- ini:
- - section: privilege_escalation
- key: become_exe
- - section: dzdo_become_plugin
- key: executable
- vars:
- - name: ansible_become_exe
- - name: ansible_dzdo_exe
- env:
- - name: ANSIBLE_BECOME_EXE
- - name: ANSIBLE_DZDO_EXE
- become_flags:
- description: Options to pass to dzdo
- default: -H -S -n
- ini:
- - section: privilege_escalation
- key: become_flags
- - section: dzdo_become_plugin
- key: flags
- vars:
- - name: ansible_become_flags
- - name: ansible_dzdo_flags
- env:
- - name: ANSIBLE_BECOME_FLAGS
- - name: ANSIBLE_DZDO_FLAGS
- become_pass:
- description: Options to pass to dzdo
- required: False
- vars:
- - name: ansible_become_password
- - name: ansible_become_pass
- - name: ansible_dzdo_pass
- env:
- - name: ANSIBLE_BECOME_PASS
- - name: ANSIBLE_DZDO_PASS
- ini:
- - section: dzdo_become_plugin
- key: password
-'''
+DOCUMENTATION = r"""
+name: dzdo
+short_description: Centrify's Direct Authorize
+description:
+ - This become plugins allows your remote/login user to execute commands as another user using the C(dzdo) utility.
+author: Ansible Core Team
+options:
+ become_user:
+ description: User you 'become' to execute the task.
+ type: string
+ ini:
+ - section: privilege_escalation
+ key: become_user
+ - section: dzdo_become_plugin
+ key: user
+ vars:
+ - name: ansible_become_user
+ - name: ansible_dzdo_user
+ env:
+ - name: ANSIBLE_BECOME_USER
+ - name: ANSIBLE_DZDO_USER
+ become_exe:
+ description: C(dzdo) executable.
+ type: string
+ default: dzdo
+ ini:
+ - section: privilege_escalation
+ key: become_exe
+ - section: dzdo_become_plugin
+ key: executable
+ vars:
+ - name: ansible_become_exe
+ - name: ansible_dzdo_exe
+ env:
+ - name: ANSIBLE_BECOME_EXE
+ - name: ANSIBLE_DZDO_EXE
+ become_flags:
+ description: Options to pass to C(dzdo).
+ type: string
+ default: -H -S -n
+ ini:
+ - section: privilege_escalation
+ key: become_flags
+ - section: dzdo_become_plugin
+ key: flags
+ vars:
+ - name: ansible_become_flags
+ - name: ansible_dzdo_flags
+ env:
+ - name: ANSIBLE_BECOME_FLAGS
+ - name: ANSIBLE_DZDO_FLAGS
+ become_pass:
+ description: Options to pass to C(dzdo).
+ type: string
+ required: false
+ vars:
+ - name: ansible_become_password
+ - name: ansible_become_pass
+ - name: ansible_dzdo_pass
+ env:
+ - name: ANSIBLE_BECOME_PASS
+ - name: ANSIBLE_DZDO_PASS
+ ini:
+ - section: dzdo_become_plugin
+ key: password
+"""
from ansible.plugins.become import BecomeBase
@@ -88,10 +90,10 @@ class BecomeModule(BecomeBase):
flags = self.get_option('become_flags')
if self.get_option('become_pass'):
- self.prompt = '[dzdo via ansible, key=%s] password:' % self._id
- flags = '%s -p "%s"' % (flags.replace('-n', ''), self.prompt)
+ self.prompt = f'[dzdo via ansible, key={self._id}] password:'
+ flags = f"{flags.replace('-n', '')} -p \"{self.prompt}\""
become_user = self.get_option('become_user')
- user = '-u %s' % (become_user) if become_user else ''
+ user = f'-u {become_user}' if become_user else ''
- return ' '.join([becomecmd, flags, user, self._build_success_command(cmd, shell)])
+ return f"{becomecmd} {flags} {user} {self._build_success_command(cmd, shell)}"
diff --git a/plugins/become/ksu.py b/plugins/become/ksu.py
index 29731d95d5..0ffba62385 100644
--- a/plugins/become/ksu.py
+++ b/plugins/become/ksu.py
@@ -1,85 +1,89 @@
-# -*- coding: utf-8 -*-
# Copyright (c) 2018, Ansible Project
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
-DOCUMENTATION = '''
- name: ksu
- short_description: Kerberos substitute user
+DOCUMENTATION = r"""
+name: ksu
+short_description: Kerberos substitute user
+description:
+ - This become plugins allows your remote/login user to execute commands as another user using the C(ksu) utility.
+author: Ansible Core Team
+options:
+ become_user:
+ description: User you 'become' to execute the task.
+ type: string
+ ini:
+ - section: privilege_escalation
+ key: become_user
+ - section: ksu_become_plugin
+ key: user
+ vars:
+ - name: ansible_become_user
+ - name: ansible_ksu_user
+ env:
+ - name: ANSIBLE_BECOME_USER
+ - name: ANSIBLE_KSU_USER
+ required: true
+ become_exe:
+ description: C(ksu) executable.
+ type: string
+ default: ksu
+ ini:
+ - section: privilege_escalation
+ key: become_exe
+ - section: ksu_become_plugin
+ key: executable
+ vars:
+ - name: ansible_become_exe
+ - name: ansible_ksu_exe
+ env:
+ - name: ANSIBLE_BECOME_EXE
+ - name: ANSIBLE_KSU_EXE
+ become_flags:
+ description: Options to pass to C(ksu).
+ type: string
+ default: ''
+ ini:
+ - section: privilege_escalation
+ key: become_flags
+ - section: ksu_become_plugin
+ key: flags
+ vars:
+ - name: ansible_become_flags
+ - name: ansible_ksu_flags
+ env:
+ - name: ANSIBLE_BECOME_FLAGS
+ - name: ANSIBLE_KSU_FLAGS
+ become_pass:
+ description: C(ksu) password.
+ type: string
+ required: false
+ vars:
+ - name: ansible_ksu_pass
+ - name: ansible_become_pass
+ - name: ansible_become_password
+ env:
+ - name: ANSIBLE_BECOME_PASS
+ - name: ANSIBLE_KSU_PASS
+ ini:
+ - section: ksu_become_plugin
+ key: password
+ prompt_l10n:
description:
- - This become plugins allows your remote/login user to execute commands as another user via the ksu utility.
- author: Ansible Core Team
- options:
- become_user:
- description: User you 'become' to execute the task
- ini:
- - section: privilege_escalation
- key: become_user
- - section: ksu_become_plugin
- key: user
- vars:
- - name: ansible_become_user
- - name: ansible_ksu_user
- env:
- - name: ANSIBLE_BECOME_USER
- - name: ANSIBLE_KSU_USER
- required: True
- become_exe:
- description: Su executable
- default: ksu
- ini:
- - section: privilege_escalation
- key: become_exe
- - section: ksu_become_plugin
- key: executable
- vars:
- - name: ansible_become_exe
- - name: ansible_ksu_exe
- env:
- - name: ANSIBLE_BECOME_EXE
- - name: ANSIBLE_KSU_EXE
- become_flags:
- description: Options to pass to ksu
- default: ''
- ini:
- - section: privilege_escalation
- key: become_flags
- - section: ksu_become_plugin
- key: flags
- vars:
- - name: ansible_become_flags
- - name: ansible_ksu_flags
- env:
- - name: ANSIBLE_BECOME_FLAGS
- - name: ANSIBLE_KSU_FLAGS
- become_pass:
- description: ksu password
- required: False
- vars:
- - name: ansible_ksu_pass
- - name: ansible_become_pass
- - name: ansible_become_password
- env:
- - name: ANSIBLE_BECOME_PASS
- - name: ANSIBLE_KSU_PASS
- ini:
- - section: ksu_become_plugin
- key: password
- prompt_l10n:
- description:
- - List of localized strings to match for prompt detection
- - If empty we'll use the built in one
- default: []
- ini:
- - section: ksu_become_plugin
- key: localized_prompts
- vars:
- - name: ansible_ksu_prompt_l10n
- env:
- - name: ANSIBLE_KSU_PROMPT_L10N
-'''
+ - List of localized strings to match for prompt detection.
+ - If empty the plugin uses the built-in one.
+ type: list
+ elements: string
+ default: []
+ ini:
+ - section: ksu_become_plugin
+ key: localized_prompts
+ vars:
+ - name: ansible_ksu_prompt_l10n
+ env:
+ - name: ANSIBLE_KSU_PROMPT_L10N
+"""
import re
@@ -118,4 +122,4 @@ class BecomeModule(BecomeBase):
flags = self.get_option('become_flags')
user = self.get_option('become_user')
- return '%s %s %s -e %s ' % (exe, user, flags, self._build_success_command(cmd, shell))
+ return f'{exe} {user} {flags} -e {self._build_success_command(cmd, shell)} '
diff --git a/plugins/become/machinectl.py b/plugins/become/machinectl.py
index 4b533baba0..685f39f5d8 100644
--- a/plugins/become/machinectl.py
+++ b/plugins/become/machinectl.py
@@ -1,95 +1,99 @@
-# -*- coding: utf-8 -*-
# Copyright (c) 2018, Ansible Project
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
-DOCUMENTATION = '''
- name: machinectl
- short_description: Systemd's machinectl privilege escalation
- description:
- - This become plugins allows your remote/login user to execute commands as another user via the machinectl utility.
- author: Ansible Core Team
- options:
- become_user:
- description: User you 'become' to execute the task
- default: ''
- ini:
- - section: privilege_escalation
- key: become_user
- - section: machinectl_become_plugin
- key: user
- vars:
- - name: ansible_become_user
- - name: ansible_machinectl_user
- env:
- - name: ANSIBLE_BECOME_USER
- - name: ANSIBLE_MACHINECTL_USER
- become_exe:
- description: Machinectl executable
- default: machinectl
- ini:
- - section: privilege_escalation
- key: become_exe
- - section: machinectl_become_plugin
- key: executable
- vars:
- - name: ansible_become_exe
- - name: ansible_machinectl_exe
- env:
- - name: ANSIBLE_BECOME_EXE
- - name: ANSIBLE_MACHINECTL_EXE
- become_flags:
- description: Options to pass to machinectl
- default: ''
- ini:
- - section: privilege_escalation
- key: become_flags
- - section: machinectl_become_plugin
- key: flags
- vars:
- - name: ansible_become_flags
- - name: ansible_machinectl_flags
- env:
- - name: ANSIBLE_BECOME_FLAGS
- - name: ANSIBLE_MACHINECTL_FLAGS
- become_pass:
- description: Password for machinectl
- required: False
- vars:
- - name: ansible_become_password
- - name: ansible_become_pass
- - name: ansible_machinectl_pass
- env:
- - name: ANSIBLE_BECOME_PASS
- - name: ANSIBLE_MACHINECTL_PASS
- ini:
- - section: machinectl_become_plugin
- key: password
- notes:
- - When not using this plugin with user C(root), it only works correctly with a polkit rule which will alter
- the behaviour of machinectl. This rule must alter the prompt behaviour to ask directly for the user credentials,
- if the user is allowed to perform the action (take a look at the examples section).
- If such a rule is not present the plugin only work if it is used in context with the root user,
- because then no further prompt will be shown by machinectl.
-'''
+DOCUMENTATION = r"""
+name: machinectl
+short_description: Systemd's machinectl privilege escalation
+description:
+ - This become plugins allows your remote/login user to execute commands as another user using the C(machinectl) utility.
+author: Ansible Core Team
+options:
+ become_user:
+ description: User you 'become' to execute the task.
+ type: string
+ default: ''
+ ini:
+ - section: privilege_escalation
+ key: become_user
+ - section: machinectl_become_plugin
+ key: user
+ vars:
+ - name: ansible_become_user
+ - name: ansible_machinectl_user
+ env:
+ - name: ANSIBLE_BECOME_USER
+ - name: ANSIBLE_MACHINECTL_USER
+ become_exe:
+ description: C(machinectl) executable.
+ type: string
+ default: machinectl
+ ini:
+ - section: privilege_escalation
+ key: become_exe
+ - section: machinectl_become_plugin
+ key: executable
+ vars:
+ - name: ansible_become_exe
+ - name: ansible_machinectl_exe
+ env:
+ - name: ANSIBLE_BECOME_EXE
+ - name: ANSIBLE_MACHINECTL_EXE
+ become_flags:
+ description: Options to pass to C(machinectl).
+ type: string
+ default: ''
+ ini:
+ - section: privilege_escalation
+ key: become_flags
+ - section: machinectl_become_plugin
+ key: flags
+ vars:
+ - name: ansible_become_flags
+ - name: ansible_machinectl_flags
+ env:
+ - name: ANSIBLE_BECOME_FLAGS
+ - name: ANSIBLE_MACHINECTL_FLAGS
+ become_pass:
+ description: Password for C(machinectl).
+ type: string
+ required: false
+ vars:
+ - name: ansible_become_password
+ - name: ansible_become_pass
+ - name: ansible_machinectl_pass
+ env:
+ - name: ANSIBLE_BECOME_PASS
+ - name: ANSIBLE_MACHINECTL_PASS
+ ini:
+ - section: machinectl_become_plugin
+ key: password
+notes:
+ - When not using this plugin with user V(root), it only works correctly with a polkit rule which alters the behaviour
+ of C(machinectl). This rule must alter the prompt behaviour to ask directly for the user credentials, if the user is allowed
+ to perform the action (take a look at the examples section). If such a rule is not present the plugin only works if it
+ is used in context with the root user, because then no further prompt is shown by C(machinectl).
+ - This become plugin does not work when connection pipelining is enabled. With ansible-core 2.19+, using it automatically
+ disables pipelining. On ansible-core 2.18 and before, pipelining must explicitly be disabled by the user.
+"""
-EXAMPLES = r'''
+EXAMPLES = r"""
# A polkit rule needed to use the module with a non-root user.
# See the Notes section for details.
-60-machinectl-fast-user-auth.rules: |
- polkit.addRule(function(action, subject) {
- if(action.id == "org.freedesktop.machine1.host-shell" && subject.isInGroup("wheel")) {
- return polkit.Result.AUTH_SELF_KEEP;
- }
- });
-'''
+/etc/polkit-1/rules.d/60-machinectl-fast-user-auth.rules: |-
+ polkit.addRule(function(action, subject) {
+ if(action.id == "org.freedesktop.machine1.host-shell" &&
+ subject.isInGroup("wheel")) {
+ return polkit.Result.AUTH_SELF_KEEP;
+ }
+ });
+"""
from re import compile as re_compile
from ansible.plugins.become import BecomeBase
-from ansible.module_utils._text import to_bytes
+from ansible.module_utils.common.text.converters import to_bytes
ansi_color_codes = re_compile(to_bytes(r'\x1B\[[0-9;]+m'))
@@ -102,6 +106,11 @@ class BecomeModule(BecomeBase):
prompt = 'Password: '
fail = ('==== AUTHENTICATION FAILED ====',)
success = ('==== AUTHENTICATION COMPLETE ====',)
+ require_tty = True # see https://github.com/ansible-collections/community.general/issues/6932
+
+ # See https://github.com/ansible/ansible/issues/81254,
+ # https://github.com/ansible/ansible/pull/78111
+ pipelining = False
@staticmethod
def remove_ansi_codes(line):
@@ -117,7 +126,7 @@ class BecomeModule(BecomeBase):
flags = self.get_option('become_flags')
user = self.get_option('become_user')
- return '%s -q shell %s %s@ %s' % (become, flags, user, cmd)
+ return f'{become} -q shell {flags} {user}@ {self._build_success_command(cmd, shell)}'
def check_success(self, b_output):
b_output = self.remove_ansi_codes(b_output)
diff --git a/plugins/become/pbrun.py b/plugins/become/pbrun.py
index 3645e95fec..c9eb975427 100644
--- a/plugins/become/pbrun.py
+++ b/plugins/become/pbrun.py
@@ -1,84 +1,86 @@
-# -*- coding: utf-8 -*-
# Copyright (c) 2018, Ansible Project
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
-DOCUMENTATION = '''
- name: pbrun
- short_description: PowerBroker run
- description:
- - This become plugins allows your remote/login user to execute commands as another user via the pbrun utility.
- author: Ansible Core Team
- options:
- become_user:
- description: User you 'become' to execute the task
- default: ''
- ini:
- - section: privilege_escalation
- key: become_user
- - section: pbrun_become_plugin
- key: user
- vars:
- - name: ansible_become_user
- - name: ansible_pbrun_user
- env:
- - name: ANSIBLE_BECOME_USER
- - name: ANSIBLE_PBRUN_USER
- become_exe:
- description: Sudo executable
- default: pbrun
- ini:
- - section: privilege_escalation
- key: become_exe
- - section: pbrun_become_plugin
- key: executable
- vars:
- - name: ansible_become_exe
- - name: ansible_pbrun_exe
- env:
- - name: ANSIBLE_BECOME_EXE
- - name: ANSIBLE_PBRUN_EXE
- become_flags:
- description: Options to pass to pbrun
- default: ''
- ini:
- - section: privilege_escalation
- key: become_flags
- - section: pbrun_become_plugin
- key: flags
- vars:
- - name: ansible_become_flags
- - name: ansible_pbrun_flags
- env:
- - name: ANSIBLE_BECOME_FLAGS
- - name: ANSIBLE_PBRUN_FLAGS
- become_pass:
- description: Password for pbrun
- required: False
- vars:
- - name: ansible_become_password
- - name: ansible_become_pass
- - name: ansible_pbrun_pass
- env:
- - name: ANSIBLE_BECOME_PASS
- - name: ANSIBLE_PBRUN_PASS
- ini:
- - section: pbrun_become_plugin
- key: password
- wrap_exe:
- description: Toggle to wrap the command pbrun calls in 'shell -c' or not
- default: False
- type: bool
- ini:
- - section: pbrun_become_plugin
- key: wrap_execution
- vars:
- - name: ansible_pbrun_wrap_execution
- env:
- - name: ANSIBLE_PBRUN_WRAP_EXECUTION
-'''
+DOCUMENTATION = r"""
+name: pbrun
+short_description: PowerBroker run
+description:
+ - This become plugins allows your remote/login user to execute commands as another user using the C(pbrun) utility.
+author: Ansible Core Team
+options:
+ become_user:
+ description: User you 'become' to execute the task.
+ type: string
+ default: ''
+ ini:
+ - section: privilege_escalation
+ key: become_user
+ - section: pbrun_become_plugin
+ key: user
+ vars:
+ - name: ansible_become_user
+ - name: ansible_pbrun_user
+ env:
+ - name: ANSIBLE_BECOME_USER
+ - name: ANSIBLE_PBRUN_USER
+ become_exe:
+ description: C(pbrun) executable.
+ type: string
+ default: pbrun
+ ini:
+ - section: privilege_escalation
+ key: become_exe
+ - section: pbrun_become_plugin
+ key: executable
+ vars:
+ - name: ansible_become_exe
+ - name: ansible_pbrun_exe
+ env:
+ - name: ANSIBLE_BECOME_EXE
+ - name: ANSIBLE_PBRUN_EXE
+ become_flags:
+ description: Options to pass to C(pbrun).
+ type: string
+ default: ''
+ ini:
+ - section: privilege_escalation
+ key: become_flags
+ - section: pbrun_become_plugin
+ key: flags
+ vars:
+ - name: ansible_become_flags
+ - name: ansible_pbrun_flags
+ env:
+ - name: ANSIBLE_BECOME_FLAGS
+ - name: ANSIBLE_PBRUN_FLAGS
+ become_pass:
+ description: Password for C(pbrun).
+ type: string
+ required: false
+ vars:
+ - name: ansible_become_password
+ - name: ansible_become_pass
+ - name: ansible_pbrun_pass
+ env:
+ - name: ANSIBLE_BECOME_PASS
+ - name: ANSIBLE_PBRUN_PASS
+ ini:
+ - section: pbrun_become_plugin
+ key: password
+ wrap_exe:
+ description: Toggle to wrap the command C(pbrun) calls in C(shell -c) or not.
+ default: false
+ type: bool
+ ini:
+ - section: pbrun_become_plugin
+ key: wrap_execution
+ vars:
+ - name: ansible_pbrun_wrap_execution
+ env:
+ - name: ANSIBLE_PBRUN_WRAP_EXECUTION
+"""
from ansible.plugins.become import BecomeBase
@@ -99,7 +101,7 @@ class BecomeModule(BecomeBase):
flags = self.get_option('become_flags')
become_user = self.get_option('become_user')
- user = '-u %s' % (become_user) if become_user else ''
+ user = f'-u {become_user}' if become_user else ''
noexe = not self.get_option('wrap_exe')
- return ' '.join([become_exe, flags, user, self._build_success_command(cmd, shell, noexe=noexe)])
+ return f"{become_exe} {flags} {user} {self._build_success_command(cmd, shell, noexe=noexe)}"
diff --git a/plugins/become/pfexec.py b/plugins/become/pfexec.py
index f14c22e68f..2e7df0f6c0 100644
--- a/plugins/become/pfexec.py
+++ b/plugins/become/pfexec.py
@@ -1,89 +1,91 @@
-# -*- coding: utf-8 -*-
# Copyright (c) 2018, Ansible Project
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
-DOCUMENTATION = '''
- name: pfexec
- short_description: profile based execution
+DOCUMENTATION = r"""
+name: pfexec
+short_description: Profile based execution
+description:
+ - This become plugins allows your remote/login user to execute commands as another user using the C(pfexec) utility.
+author: Ansible Core Team
+options:
+ become_user:
description:
- - This become plugins allows your remote/login user to execute commands as another user via the pfexec utility.
- author: Ansible Core Team
- options:
- become_user:
- description:
- - User you 'become' to execute the task
- - This plugin ignores this setting as pfexec uses it's own C(exec_attr) to figure this out,
- but it is supplied here for Ansible to make decisions needed for the task execution, like file permissions.
- default: root
- ini:
- - section: privilege_escalation
- key: become_user
- - section: pfexec_become_plugin
- key: user
- vars:
- - name: ansible_become_user
- - name: ansible_pfexec_user
- env:
- - name: ANSIBLE_BECOME_USER
- - name: ANSIBLE_PFEXEC_USER
- become_exe:
- description: Sudo executable
- default: pfexec
- ini:
- - section: privilege_escalation
- key: become_exe
- - section: pfexec_become_plugin
- key: executable
- vars:
- - name: ansible_become_exe
- - name: ansible_pfexec_exe
- env:
- - name: ANSIBLE_BECOME_EXE
- - name: ANSIBLE_PFEXEC_EXE
- become_flags:
- description: Options to pass to pfexec
- default: -H -S -n
- ini:
- - section: privilege_escalation
- key: become_flags
- - section: pfexec_become_plugin
- key: flags
- vars:
- - name: ansible_become_flags
- - name: ansible_pfexec_flags
- env:
- - name: ANSIBLE_BECOME_FLAGS
- - name: ANSIBLE_PFEXEC_FLAGS
- become_pass:
- description: pfexec password
- required: False
- vars:
- - name: ansible_become_password
- - name: ansible_become_pass
- - name: ansible_pfexec_pass
- env:
- - name: ANSIBLE_BECOME_PASS
- - name: ANSIBLE_PFEXEC_PASS
- ini:
- - section: pfexec_become_plugin
- key: password
- wrap_exe:
- description: Toggle to wrap the command pfexec calls in 'shell -c' or not
- default: False
- type: bool
- ini:
- - section: pfexec_become_plugin
- key: wrap_execution
- vars:
- - name: ansible_pfexec_wrap_execution
- env:
- - name: ANSIBLE_PFEXEC_WRAP_EXECUTION
- notes:
- - This plugin ignores I(become_user) as pfexec uses it's own C(exec_attr) to figure this out.
-'''
+ - User you 'become' to execute the task.
+ - This plugin ignores this setting as pfexec uses its own C(exec_attr) to figure this out, but it is supplied here for
+ Ansible to make decisions needed for the task execution, like file permissions.
+ type: string
+ default: root
+ ini:
+ - section: privilege_escalation
+ key: become_user
+ - section: pfexec_become_plugin
+ key: user
+ vars:
+ - name: ansible_become_user
+ - name: ansible_pfexec_user
+ env:
+ - name: ANSIBLE_BECOME_USER
+ - name: ANSIBLE_PFEXEC_USER
+ become_exe:
+ description: C(pfexec) executable.
+ type: string
+ default: pfexec
+ ini:
+ - section: privilege_escalation
+ key: become_exe
+ - section: pfexec_become_plugin
+ key: executable
+ vars:
+ - name: ansible_become_exe
+ - name: ansible_pfexec_exe
+ env:
+ - name: ANSIBLE_BECOME_EXE
+ - name: ANSIBLE_PFEXEC_EXE
+ become_flags:
+ description: Options to pass to C(pfexec).
+ type: string
+ default: -H -S -n
+ ini:
+ - section: privilege_escalation
+ key: become_flags
+ - section: pfexec_become_plugin
+ key: flags
+ vars:
+ - name: ansible_become_flags
+ - name: ansible_pfexec_flags
+ env:
+ - name: ANSIBLE_BECOME_FLAGS
+ - name: ANSIBLE_PFEXEC_FLAGS
+ become_pass:
+ description: C(pfexec) password.
+ type: string
+ required: false
+ vars:
+ - name: ansible_become_password
+ - name: ansible_become_pass
+ - name: ansible_pfexec_pass
+ env:
+ - name: ANSIBLE_BECOME_PASS
+ - name: ANSIBLE_PFEXEC_PASS
+ ini:
+ - section: pfexec_become_plugin
+ key: password
+ wrap_exe:
+ description: Toggle to wrap the command C(pfexec) calls in C(shell -c) or not.
+ default: false
+ type: bool
+ ini:
+ - section: pfexec_become_plugin
+ key: wrap_execution
+ vars:
+ - name: ansible_pfexec_wrap_execution
+ env:
+ - name: ANSIBLE_PFEXEC_WRAP_EXECUTION
+notes:
+ - This plugin ignores O(become_user) as pfexec uses its own C(exec_attr) to figure this out.
+"""
from ansible.plugins.become import BecomeBase
@@ -102,4 +104,4 @@ class BecomeModule(BecomeBase):
flags = self.get_option('become_flags')
noexe = not self.get_option('wrap_exe')
- return '%s %s "%s"' % (exe, flags, self._build_success_command(cmd, shell, noexe=noexe))
+ return f'{exe} {flags} {self._build_success_command(cmd, shell, noexe=noexe)}'
diff --git a/plugins/become/pmrun.py b/plugins/become/pmrun.py
index bb384aeedf..413600cdbf 100644
--- a/plugins/become/pmrun.py
+++ b/plugins/become/pmrun.py
@@ -1,64 +1,65 @@
-# -*- coding: utf-8 -*-
# Copyright (c) 2018, Ansible Project
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
-DOCUMENTATION = '''
- name: pmrun
- short_description: Privilege Manager run
- description:
- - This become plugins allows your remote/login user to execute commands as another user via the pmrun utility.
- author: Ansible Core Team
- options:
- become_exe:
- description: Sudo executable
- default: pmrun
- ini:
- - section: privilege_escalation
- key: become_exe
- - section: pmrun_become_plugin
- key: executable
- vars:
- - name: ansible_become_exe
- - name: ansible_pmrun_exe
- env:
- - name: ANSIBLE_BECOME_EXE
- - name: ANSIBLE_PMRUN_EXE
- become_flags:
- description: Options to pass to pmrun
- default: ''
- ini:
- - section: privilege_escalation
- key: become_flags
- - section: pmrun_become_plugin
- key: flags
- vars:
- - name: ansible_become_flags
- - name: ansible_pmrun_flags
- env:
- - name: ANSIBLE_BECOME_FLAGS
- - name: ANSIBLE_PMRUN_FLAGS
- become_pass:
- description: pmrun password
- required: False
- vars:
- - name: ansible_become_password
- - name: ansible_become_pass
- - name: ansible_pmrun_pass
- env:
- - name: ANSIBLE_BECOME_PASS
- - name: ANSIBLE_PMRUN_PASS
- ini:
- - section: pmrun_become_plugin
- key: password
- notes:
- - This plugin ignores the become_user supplied and uses pmrun's own configuration to select the user.
-'''
+DOCUMENTATION = r"""
+name: pmrun
+short_description: Privilege Manager run
+description:
+ - This become plugins allows your remote/login user to execute commands as another user using the C(pmrun) utility.
+author: Ansible Core Team
+options:
+ become_exe:
+ description: C(pmrun) executable.
+ type: string
+ default: pmrun
+ ini:
+ - section: privilege_escalation
+ key: become_exe
+ - section: pmrun_become_plugin
+ key: executable
+ vars:
+ - name: ansible_become_exe
+ - name: ansible_pmrun_exe
+ env:
+ - name: ANSIBLE_BECOME_EXE
+ - name: ANSIBLE_PMRUN_EXE
+ become_flags:
+ description: Options to pass to C(pmrun).
+ type: string
+ default: ''
+ ini:
+ - section: privilege_escalation
+ key: become_flags
+ - section: pmrun_become_plugin
+ key: flags
+ vars:
+ - name: ansible_become_flags
+ - name: ansible_pmrun_flags
+ env:
+ - name: ANSIBLE_BECOME_FLAGS
+ - name: ANSIBLE_PMRUN_FLAGS
+ become_pass:
+ description: C(pmrun) password.
+ type: string
+ required: false
+ vars:
+ - name: ansible_become_password
+ - name: ansible_become_pass
+ - name: ansible_pmrun_pass
+ env:
+ - name: ANSIBLE_BECOME_PASS
+ - name: ANSIBLE_PMRUN_PASS
+ ini:
+ - section: pmrun_become_plugin
+ key: password
+notes:
+ - This plugin ignores the C(become_user) supplied and uses C(pmrun)'s own configuration to select the user.
+"""
+from shlex import quote as shlex_quote
from ansible.plugins.become import BecomeBase
-from ansible.module_utils.six.moves import shlex_quote
class BecomeModule(BecomeBase):
@@ -75,4 +76,4 @@ class BecomeModule(BecomeBase):
become = self.get_option('become_exe')
flags = self.get_option('become_flags')
- return '%s %s %s' % (become, flags, shlex_quote(self._build_success_command(cmd, shell)))
+ return f'{become} {flags} {shlex_quote(self._build_success_command(cmd, shell))}'
diff --git a/plugins/become/run0.py b/plugins/become/run0.py
new file mode 100644
index 0000000000..4362d53ebf
--- /dev/null
+++ b/plugins/become/run0.py
@@ -0,0 +1,126 @@
+# Copyright (c) 2024, Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import annotations
+
+
+DOCUMENTATION = r"""
+name: run0
+short_description: Systemd's run0
+description:
+ - This become plugins allows your remote/login user to execute commands as another user using the C(run0) utility.
+author:
+ - Thomas Sjögren (@konstruktoid)
+version_added: '9.0.0'
+options:
+ become_user:
+ description: User you 'become' to execute the task.
+ default: root
+ ini:
+ - section: privilege_escalation
+ key: become_user
+ - section: run0_become_plugin
+ key: user
+ vars:
+ - name: ansible_become_user
+ - name: ansible_run0_user
+ env:
+ - name: ANSIBLE_BECOME_USER
+ - name: ANSIBLE_RUN0_USER
+ type: string
+ become_exe:
+ description: C(run0) executable.
+ default: run0
+ ini:
+ - section: privilege_escalation
+ key: become_exe
+ - section: run0_become_plugin
+ key: executable
+ vars:
+ - name: ansible_become_exe
+ - name: ansible_run0_exe
+ env:
+ - name: ANSIBLE_BECOME_EXE
+ - name: ANSIBLE_RUN0_EXE
+ type: string
+ become_flags:
+ description: Options to pass to C(run0).
+ default: ''
+ ini:
+ - section: privilege_escalation
+ key: become_flags
+ - section: run0_become_plugin
+ key: flags
+ vars:
+ - name: ansible_become_flags
+ - name: ansible_run0_flags
+ env:
+ - name: ANSIBLE_BECOME_FLAGS
+ - name: ANSIBLE_RUN0_FLAGS
+ type: string
+notes:
+ - This plugin only works when a C(polkit) rule is in place.
+"""
+
+EXAMPLES = r"""
+# An example polkit rule that allows the user 'ansible' in the 'wheel' group
+# to execute commands using run0 without authentication.
+/etc/polkit-1/rules.d/60-run0-fast-user-auth.rules: |-
+ polkit.addRule(function(action, subject) {
+ if(action.id == "org.freedesktop.systemd1.manage-units" &&
+ subject.isInGroup("wheel") &&
+ subject.user == "ansible") {
+ return polkit.Result.YES;
+ }
+ });
+"""
+
+from re import compile as re_compile
+
+from ansible.plugins.become import BecomeBase
+from ansible.module_utils.common.text.converters import to_bytes
+
+ansi_color_codes = re_compile(to_bytes(r"\x1B\[[0-9;]+m"))
+
+
+class BecomeModule(BecomeBase):
+
+ name = "community.general.run0"
+
+ prompt = "Password: "
+ fail = ("==== AUTHENTICATION FAILED ====",)
+ success = ("==== AUTHENTICATION COMPLETE ====",)
+ require_tty = (
+ True # see https://github.com/ansible-collections/community.general/issues/6932
+ )
+
+ @staticmethod
+ def remove_ansi_codes(line):
+ return ansi_color_codes.sub(b"", line)
+
+ def build_become_command(self, cmd, shell):
+ super().build_become_command(cmd, shell)
+
+ if not cmd:
+ return cmd
+
+ become = self.get_option("become_exe")
+ flags = self.get_option("become_flags")
+ user = self.get_option("become_user")
+
+ return (
+ f"{become} --user={user} {flags} {self._build_success_command(cmd, shell)}"
+ )
+
+ def check_success(self, b_output):
+ b_output = self.remove_ansi_codes(b_output)
+ return super().check_success(b_output)
+
+ def check_incorrect_password(self, b_output):
+ b_output = self.remove_ansi_codes(b_output)
+ return super().check_incorrect_password(b_output)
+
+ def check_missing_password(self, b_output):
+ b_output = self.remove_ansi_codes(b_output)
+ return super().check_missing_password(b_output)
diff --git a/plugins/become/sesu.py b/plugins/become/sesu.py
index 751163d19e..ecd29c83c5 100644
--- a/plugins/become/sesu.py
+++ b/plugins/become/sesu.py
@@ -1,73 +1,75 @@
-# -*- coding: utf-8 -*-
# Copyright (c) 2018, Ansible Project
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
-DOCUMENTATION = '''
- name: sesu
- short_description: CA Privileged Access Manager
- description:
- - This become plugins allows your remote/login user to execute commands as another user via the sesu utility.
- author: ansible (@nekonyuu)
- options:
- become_user:
- description: User you 'become' to execute the task
- default: ''
- ini:
- - section: privilege_escalation
- key: become_user
- - section: sesu_become_plugin
- key: user
- vars:
- - name: ansible_become_user
- - name: ansible_sesu_user
- env:
- - name: ANSIBLE_BECOME_USER
- - name: ANSIBLE_SESU_USER
- become_exe:
- description: sesu executable
- default: sesu
- ini:
- - section: privilege_escalation
- key: become_exe
- - section: sesu_become_plugin
- key: executable
- vars:
- - name: ansible_become_exe
- - name: ansible_sesu_exe
- env:
- - name: ANSIBLE_BECOME_EXE
- - name: ANSIBLE_SESU_EXE
- become_flags:
- description: Options to pass to sesu
- default: -H -S -n
- ini:
- - section: privilege_escalation
- key: become_flags
- - section: sesu_become_plugin
- key: flags
- vars:
- - name: ansible_become_flags
- - name: ansible_sesu_flags
- env:
- - name: ANSIBLE_BECOME_FLAGS
- - name: ANSIBLE_SESU_FLAGS
- become_pass:
- description: Password to pass to sesu
- required: False
- vars:
- - name: ansible_become_password
- - name: ansible_become_pass
- - name: ansible_sesu_pass
- env:
- - name: ANSIBLE_BECOME_PASS
- - name: ANSIBLE_SESU_PASS
- ini:
- - section: sesu_become_plugin
- key: password
-'''
+DOCUMENTATION = r"""
+name: sesu
+short_description: CA Privileged Access Manager
+description:
+ - This become plugins allows your remote/login user to execute commands as another user using the C(sesu) utility.
+author: ansible (@nekonyuu)
+options:
+ become_user:
+ description: User you 'become' to execute the task.
+ type: string
+ default: ''
+ ini:
+ - section: privilege_escalation
+ key: become_user
+ - section: sesu_become_plugin
+ key: user
+ vars:
+ - name: ansible_become_user
+ - name: ansible_sesu_user
+ env:
+ - name: ANSIBLE_BECOME_USER
+ - name: ANSIBLE_SESU_USER
+ become_exe:
+ description: C(sesu) executable.
+ type: string
+ default: sesu
+ ini:
+ - section: privilege_escalation
+ key: become_exe
+ - section: sesu_become_plugin
+ key: executable
+ vars:
+ - name: ansible_become_exe
+ - name: ansible_sesu_exe
+ env:
+ - name: ANSIBLE_BECOME_EXE
+ - name: ANSIBLE_SESU_EXE
+ become_flags:
+ description: Options to pass to C(sesu).
+ type: string
+ default: -H -S -n
+ ini:
+ - section: privilege_escalation
+ key: become_flags
+ - section: sesu_become_plugin
+ key: flags
+ vars:
+ - name: ansible_become_flags
+ - name: ansible_sesu_flags
+ env:
+ - name: ANSIBLE_BECOME_FLAGS
+ - name: ANSIBLE_SESU_FLAGS
+ become_pass:
+ description: Password to pass to C(sesu).
+ type: string
+ required: false
+ vars:
+ - name: ansible_become_password
+ - name: ansible_become_pass
+ - name: ansible_sesu_pass
+ env:
+ - name: ANSIBLE_BECOME_PASS
+ - name: ANSIBLE_SESU_PASS
+ ini:
+ - section: sesu_become_plugin
+ key: password
+"""
from ansible.plugins.become import BecomeBase
@@ -89,4 +91,4 @@ class BecomeModule(BecomeBase):
flags = self.get_option('become_flags')
user = self.get_option('become_user')
- return '%s %s %s -c %s' % (become, flags, user, self._build_success_command(cmd, shell))
+ return f'{become} {flags} {user} -c {self._build_success_command(cmd, shell)}'
diff --git a/plugins/become/sudosu.py b/plugins/become/sudosu.py
index 60bb2aa517..3b5d4d8b7f 100644
--- a/plugins/become/sudosu.py
+++ b/plugins/become/sudosu.py
@@ -1,60 +1,77 @@
-# -*- coding: utf-8 -*-
# Copyright (c) 2021, Ansible Project
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
-DOCUMENTATION = """
- name: sudosu
- short_description: Run tasks using sudo su -
+DOCUMENTATION = r"""
+name: sudosu
+short_description: Run tasks using sudo su -
+description:
+ - This become plugin allows your remote/login user to execute commands as another user using the C(sudo) and C(su) utilities
+ combined.
+author:
+ - Dag Wieers (@dagwieers)
+version_added: 2.4.0
+options:
+ become_user:
+ description: User you 'become' to execute the task.
+ type: string
+ default: root
+ ini:
+ - section: privilege_escalation
+ key: become_user
+ - section: sudo_become_plugin
+ key: user
+ vars:
+ - name: ansible_become_user
+ - name: ansible_sudo_user
+ env:
+ - name: ANSIBLE_BECOME_USER
+ - name: ANSIBLE_SUDO_USER
+ become_flags:
+ description: Options to pass to C(sudo).
+ type: string
+ default: -H -S -n
+ ini:
+ - section: privilege_escalation
+ key: become_flags
+ - section: sudo_become_plugin
+ key: flags
+ vars:
+ - name: ansible_become_flags
+ - name: ansible_sudo_flags
+ env:
+ - name: ANSIBLE_BECOME_FLAGS
+ - name: ANSIBLE_SUDO_FLAGS
+ become_pass:
+ description: Password to pass to C(sudo).
+ type: string
+ required: false
+ vars:
+ - name: ansible_become_password
+ - name: ansible_become_pass
+ - name: ansible_sudo_pass
+ env:
+ - name: ANSIBLE_BECOME_PASS
+ - name: ANSIBLE_SUDO_PASS
+ ini:
+ - section: sudo_become_plugin
+ key: password
+ alt_method:
description:
- - This become plugin allows your remote/login user to execute commands as another user via the C(sudo) and C(su) utilities combined.
- author:
- - Dag Wieers (@dagwieers)
- version_added: 2.4.0
- options:
- become_user:
- description: User you 'become' to execute the task.
- default: root
- ini:
- - section: privilege_escalation
- key: become_user
- - section: sudo_become_plugin
- key: user
- vars:
- - name: ansible_become_user
- - name: ansible_sudo_user
- env:
- - name: ANSIBLE_BECOME_USER
- - name: ANSIBLE_SUDO_USER
- become_flags:
- description: Options to pass to C(sudo).
- default: -H -S -n
- ini:
- - section: privilege_escalation
- key: become_flags
- - section: sudo_become_plugin
- key: flags
- vars:
- - name: ansible_become_flags
- - name: ansible_sudo_flags
- env:
- - name: ANSIBLE_BECOME_FLAGS
- - name: ANSIBLE_SUDO_FLAGS
- become_pass:
- description: Password to pass to C(sudo).
- required: false
- vars:
- - name: ansible_become_password
- - name: ansible_become_pass
- - name: ansible_sudo_pass
- env:
- - name: ANSIBLE_BECOME_PASS
- - name: ANSIBLE_SUDO_PASS
- ini:
- - section: sudo_become_plugin
- key: password
+ - Whether to use an alternative method to call C(su). Instead of running C(su -l user /path/to/shell -c command), it
+ runs C(su -l user -c command).
+ - Use this when the default one is not working on your system.
+ required: false
+ type: boolean
+ ini:
+ - section: community.general.sudosu
+ key: alternative_method
+ vars:
+ - name: ansible_sudosu_alt_method
+ env:
+ - name: ANSIBLE_SUDOSU_ALT_METHOD
+ version_added: 9.2.0
"""
@@ -80,13 +97,16 @@ class BecomeModule(BecomeBase):
flags = self.get_option('become_flags') or ''
prompt = ''
if self.get_option('become_pass'):
- self.prompt = '[sudo via ansible, key=%s] password:' % self._id
+ self.prompt = f'[sudo via ansible, key={self._id}] password:'
if flags: # this could be simplified, but kept as is for now for backwards string matching
flags = flags.replace('-n', '')
- prompt = '-p "%s"' % (self.prompt)
+ prompt = f'-p "{self.prompt}"'
user = self.get_option('become_user') or ''
if user:
- user = '%s' % (user)
+ user = f'{user}'
- return ' '.join([becomecmd, flags, prompt, 'su -l', user, self._build_success_command(cmd, shell)])
+ if self.get_option('alt_method'):
+ return f"{becomecmd} {flags} {prompt} su -l {user} -c {self._build_success_command(cmd, shell, True)}"
+ else:
+ return f"{becomecmd} {flags} {prompt} su -l {user} {self._build_success_command(cmd, shell)}"
diff --git a/plugins/cache/memcached.py b/plugins/cache/memcached.py
index 77f1717e45..28011e8cab 100644
--- a/plugins/cache/memcached.py
+++ b/plugins/cache/memcached.py
@@ -1,50 +1,50 @@
-# -*- coding: utf-8 -*-
# Copyright (c) 2014, Brian Coca, Josh Drake, et al
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
-DOCUMENTATION = '''
- author: Unknown (!UNKNOWN)
- name: memcached
- short_description: Use memcached DB for cache
+DOCUMENTATION = r"""
+author: Unknown (!UNKNOWN)
+name: memcached
+short_description: Use memcached DB for cache
+description:
+ - This cache uses JSON formatted, per host records saved in memcached.
+requirements:
+ - memcache (python lib)
+options:
+ _uri:
description:
- - This cache uses JSON formatted, per host records saved in memcached.
- requirements:
- - memcache (python lib)
- options:
- _uri:
- description:
- - List of connection information for the memcached DBs
- default: ['127.0.0.1:11211']
- type: list
- elements: string
- env:
- - name: ANSIBLE_CACHE_PLUGIN_CONNECTION
- ini:
- - key: fact_caching_connection
- section: defaults
- _prefix:
- description: User defined prefix to use when creating the DB entries
- default: ansible_facts
- env:
- - name: ANSIBLE_CACHE_PLUGIN_PREFIX
- ini:
- - key: fact_caching_prefix
- section: defaults
- _timeout:
- default: 86400
- description: Expiration timeout in seconds for the cache plugin data. Set to 0 to never expire
- env:
- - name: ANSIBLE_CACHE_PLUGIN_TIMEOUT
- ini:
- - key: fact_caching_timeout
- section: defaults
- type: integer
-'''
+ - List of connection information for the memcached DBs.
+ default: ['127.0.0.1:11211']
+ type: list
+ elements: string
+ env:
+ - name: ANSIBLE_CACHE_PLUGIN_CONNECTION
+ ini:
+ - key: fact_caching_connection
+ section: defaults
+ _prefix:
+ description: User defined prefix to use when creating the DB entries.
+ type: string
+ default: ansible_facts
+ env:
+ - name: ANSIBLE_CACHE_PLUGIN_PREFIX
+ ini:
+ - key: fact_caching_prefix
+ section: defaults
+ _timeout:
+ default: 86400
+ type: integer
+ # TODO: determine whether it is OK to change to: type: float
+ description: Expiration timeout in seconds for the cache plugin data. Set to 0 to never expire.
+ env:
+ - name: ANSIBLE_CACHE_PLUGIN_TIMEOUT
+ ini:
+ - key: fact_caching_timeout
+ section: defaults
+"""
import collections
import os
@@ -52,11 +52,9 @@ import time
from multiprocessing import Lock
from itertools import chain
-from ansible import constants as C
from ansible.errors import AnsibleError
-from ansible.module_utils.common._collections_compat import MutableSet
+from collections.abc import MutableSet
from ansible.plugins.cache import BaseCacheModule
-from ansible.release import __version__ as ansible_base_version
from ansible.utils.display import Display
try:
@@ -191,7 +189,7 @@ class CacheModule(BaseCacheModule):
self._keys = CacheModuleKeys(self._db, self._db.get(CacheModuleKeys.PREFIX) or [])
def _make_key(self, key):
- return "{0}{1}".format(self._prefix, key)
+ return f"{self._prefix}{key}"
def _expire_keys(self):
if self._timeout > 0:
diff --git a/plugins/cache/pickle.py b/plugins/cache/pickle.py
index 10295bb5d2..6c053138c8 100644
--- a/plugins/cache/pickle.py
+++ b/plugins/cache/pickle.py
@@ -1,52 +1,49 @@
-# -*- coding: utf-8 -*-
# Copyright (c) 2017, Brian Coca
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
# Make coding more python3-ish
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
-DOCUMENTATION = '''
- name: pickle
- short_description: Pickle formatted files.
+DOCUMENTATION = r"""
+name: pickle
+short_description: Pickle formatted files
+description:
+ - This cache uses Python's pickle serialization format, in per host files, saved to the filesystem.
+author: Brian Coca (@bcoca)
+options:
+ _uri:
+ required: true
description:
- - This cache uses Python's pickle serialization format, in per host files, saved to the filesystem.
- author: Brian Coca (@bcoca)
- options:
- _uri:
- required: True
- description:
- - Path in which the cache plugin will save the files
- env:
- - name: ANSIBLE_CACHE_PLUGIN_CONNECTION
- ini:
- - key: fact_caching_connection
- section: defaults
- _prefix:
- description: User defined prefix to use when creating the files
- env:
- - name: ANSIBLE_CACHE_PLUGIN_PREFIX
- ini:
- - key: fact_caching_prefix
- section: defaults
- _timeout:
- default: 86400
- description: Expiration timeout in seconds for the cache plugin data. Set to 0 to never expire
- env:
- - name: ANSIBLE_CACHE_PLUGIN_TIMEOUT
- ini:
- - key: fact_caching_timeout
- section: defaults
-'''
+ - Path in which the cache plugin saves the files.
+ env:
+ - name: ANSIBLE_CACHE_PLUGIN_CONNECTION
+ ini:
+ - key: fact_caching_connection
+ section: defaults
+ type: path
+ _prefix:
+ description: User defined prefix to use when creating the files.
+ env:
+ - name: ANSIBLE_CACHE_PLUGIN_PREFIX
+ ini:
+ - key: fact_caching_prefix
+ section: defaults
+ type: string
+ _timeout:
+ default: 86400
+ description: Expiration timeout in seconds for the cache plugin data. Set to 0 to never expire.
+ env:
+ - name: ANSIBLE_CACHE_PLUGIN_TIMEOUT
+ ini:
+ - key: fact_caching_timeout
+ section: defaults
+ type: float
+"""
-try:
- import cPickle as pickle
-except ImportError:
- import pickle
+import pickle
-from ansible.module_utils.six import PY3
from ansible.plugins.cache import BaseFileCacheModule
@@ -54,14 +51,12 @@ class CacheModule(BaseFileCacheModule):
"""
A caching module backed by pickle files.
"""
+ _persistent = False # prevent unnecessary JSON serialization and key munging
def _load(self, filepath):
# Pickle is a binary format
with open(filepath, 'rb') as f:
- if PY3:
- return pickle.load(f, encoding='bytes')
- else:
- return pickle.load(f)
+ return pickle.load(f, encoding='bytes')
def _dump(self, value, filepath):
with open(filepath, 'wb') as f:
diff --git a/plugins/cache/redis.py b/plugins/cache/redis.py
index 121f9b22f4..d7b596bb32 100644
--- a/plugins/cache/redis.py
+++ b/plugins/cache/redis.py
@@ -1,78 +1,78 @@
-# -*- coding: utf-8 -*-
# Copyright (c) 2014, Brian Coca, Josh Drake, et al
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
-DOCUMENTATION = '''
- author: Unknown (!UNKNOWN)
- name: redis
- short_description: Use Redis DB for cache
+DOCUMENTATION = r"""
+author: Unknown (!UNKNOWN)
+name: redis
+short_description: Use Redis DB for cache
+description:
+ - This cache uses JSON formatted, per host records saved in Redis.
+requirements:
+ - redis>=2.4.5 (python lib)
+options:
+ _uri:
description:
- - This cache uses JSON formatted, per host records saved in Redis.
- requirements:
- - redis>=2.4.5 (python lib)
- options:
- _uri:
- description:
- - A colon separated string of connection information for Redis.
- - The format is C(host:port:db:password), for example C(localhost:6379:0:changeme).
- - To use encryption in transit, prefix the connection with C(tls://), as in C(tls://localhost:6379:0:changeme).
- - To use redis sentinel, use separator C(;), for example C(localhost:26379;localhost:26379;0:changeme). Requires redis>=2.9.0.
- required: True
- env:
- - name: ANSIBLE_CACHE_PLUGIN_CONNECTION
- ini:
- - key: fact_caching_connection
- section: defaults
- _prefix:
- description: User defined prefix to use when creating the DB entries
- default: ansible_facts
- env:
- - name: ANSIBLE_CACHE_PLUGIN_PREFIX
- ini:
- - key: fact_caching_prefix
- section: defaults
- _keyset_name:
- description: User defined name for cache keyset name.
- default: ansible_cache_keys
- env:
- - name: ANSIBLE_CACHE_REDIS_KEYSET_NAME
- ini:
- - key: fact_caching_redis_keyset_name
- section: defaults
- version_added: 1.3.0
- _sentinel_service_name:
- description: The redis sentinel service name (or referenced as cluster name).
- env:
- - name: ANSIBLE_CACHE_REDIS_SENTINEL
- ini:
- - key: fact_caching_redis_sentinel
- section: defaults
- version_added: 1.3.0
- _timeout:
- default: 86400
- description: Expiration timeout in seconds for the cache plugin data. Set to 0 to never expire
- env:
- - name: ANSIBLE_CACHE_PLUGIN_TIMEOUT
- ini:
- - key: fact_caching_timeout
- section: defaults
- type: integer
-'''
+ - A colon separated string of connection information for Redis.
+ - The format is V(host:port:db:password), for example V(localhost:6379:0:changeme).
+ - To use encryption in transit, prefix the connection with V(tls://), as in V(tls://localhost:6379:0:changeme).
+ - To use redis sentinel, use separator V(;), for example V(localhost:26379;localhost:26379;0:changeme). Requires redis>=2.9.0.
+ type: string
+ required: true
+ env:
+ - name: ANSIBLE_CACHE_PLUGIN_CONNECTION
+ ini:
+ - key: fact_caching_connection
+ section: defaults
+ _prefix:
+ description: User defined prefix to use when creating the DB entries.
+ type: string
+ default: ansible_facts
+ env:
+ - name: ANSIBLE_CACHE_PLUGIN_PREFIX
+ ini:
+ - key: fact_caching_prefix
+ section: defaults
+ _keyset_name:
+ description: User defined name for cache keyset name.
+ type: string
+ default: ansible_cache_keys
+ env:
+ - name: ANSIBLE_CACHE_REDIS_KEYSET_NAME
+ ini:
+ - key: fact_caching_redis_keyset_name
+ section: defaults
+ version_added: 1.3.0
+ _sentinel_service_name:
+ description: The redis sentinel service name (or referenced as cluster name).
+ type: string
+ env:
+ - name: ANSIBLE_CACHE_REDIS_SENTINEL
+ ini:
+ - key: fact_caching_redis_sentinel
+ section: defaults
+ version_added: 1.3.0
+ _timeout:
+ default: 86400
+ type: integer
+ # TODO: determine whether it is OK to change to: type: float
+ description: Expiration timeout in seconds for the cache plugin data. Set to 0 to never expire.
+ env:
+ - name: ANSIBLE_CACHE_PLUGIN_TIMEOUT
+ ini:
+ - key: fact_caching_timeout
+ section: defaults
+"""
import re
import time
import json
-from ansible import constants as C
from ansible.errors import AnsibleError
-from ansible.module_utils.common.text.converters import to_native
from ansible.parsing.ajson import AnsibleJSONEncoder, AnsibleJSONDecoder
from ansible.plugins.cache import BaseCacheModule
-from ansible.release import __version__ as ansible_base_version
from ansible.utils.display import Display
try:
@@ -128,7 +128,7 @@ class CacheModule(BaseCacheModule):
connection = self._parse_connection(self.re_url_conn, uri)
self._db = StrictRedis(*connection, **kw)
- display.vv('Redis connection: %s' % self._db)
+ display.vv(f'Redis connection: {self._db}')
@staticmethod
def _parse_connection(re_patt, uri):
@@ -152,7 +152,7 @@ class CacheModule(BaseCacheModule):
# format: "localhost:26379;localhost2:26379;0:changeme"
connections = uri.split(';')
connection_args = connections.pop(-1)
- if len(connection_args) > 0: # hanle if no db nr is given
+ if len(connection_args) > 0: # handle if no db nr is given
connection_args = connection_args.split(':')
kw['db'] = connection_args.pop(0)
try:
@@ -161,12 +161,12 @@ class CacheModule(BaseCacheModule):
pass # password is optional
sentinels = [self._parse_connection(self.re_sent_conn, shost) for shost in connections]
- display.vv('\nUsing redis sentinels: %s' % sentinels)
+ display.vv(f'\nUsing redis sentinels: {sentinels}')
scon = Sentinel(sentinels, **kw)
try:
return scon.master_for(self._sentinel_service_name, socket_timeout=0.2)
except Exception as exc:
- raise AnsibleError('Could not connect to redis sentinel: %s' % to_native(exc))
+ raise AnsibleError(f'Could not connect to redis sentinel: {exc}')
def _make_key(self, key):
return self._prefix + key
@@ -224,7 +224,7 @@ class CacheModule(BaseCacheModule):
def copy(self):
# TODO: there is probably a better way to do this in redis
- ret = dict([(k, self.get(k)) for k in self.keys()])
+ ret = {k: self.get(k) for k in self.keys()}
return ret
def __getstate__(self):
diff --git a/plugins/cache/yaml.py b/plugins/cache/yaml.py
index 08620816b6..52cbf887de 100644
--- a/plugins/cache/yaml.py
+++ b/plugins/cache/yaml.py
@@ -1,49 +1,49 @@
-# -*- coding: utf-8 -*-
# Copyright (c) 2017, Brian Coca
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
# Make coding more python3-ish
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
-DOCUMENTATION = '''
- name: yaml
- short_description: YAML formatted files.
+DOCUMENTATION = r"""
+name: yaml
+short_description: YAML formatted files
+description:
+ - This cache uses YAML formatted, per host, files saved to the filesystem.
+author: Brian Coca (@bcoca)
+options:
+ _uri:
+ required: true
description:
- - This cache uses YAML formatted, per host, files saved to the filesystem.
- author: Brian Coca (@bcoca)
- options:
- _uri:
- required: True
- description:
- - Path in which the cache plugin will save the files
- env:
- - name: ANSIBLE_CACHE_PLUGIN_CONNECTION
- ini:
- - key: fact_caching_connection
- section: defaults
- _prefix:
- description: User defined prefix to use when creating the files
- env:
- - name: ANSIBLE_CACHE_PLUGIN_PREFIX
- ini:
- - key: fact_caching_prefix
- section: defaults
- _timeout:
- default: 86400
- description: Expiration timeout in seconds for the cache plugin data. Set to 0 to never expire
- env:
- - name: ANSIBLE_CACHE_PLUGIN_TIMEOUT
- ini:
- - key: fact_caching_timeout
- section: defaults
- type: integer
-'''
+ - Path in which the cache plugin saves the files.
+ env:
+ - name: ANSIBLE_CACHE_PLUGIN_CONNECTION
+ ini:
+ - key: fact_caching_connection
+ section: defaults
+ type: string
+ _prefix:
+ description: User defined prefix to use when creating the files.
+ env:
+ - name: ANSIBLE_CACHE_PLUGIN_PREFIX
+ ini:
+ - key: fact_caching_prefix
+ section: defaults
+ type: string
+ _timeout:
+ default: 86400
+ description: Expiration timeout in seconds for the cache plugin data. Set to 0 to never expire.
+ env:
+ - name: ANSIBLE_CACHE_PLUGIN_TIMEOUT
+ ini:
+ - key: fact_caching_timeout
+ section: defaults
+ type: integer
+ # TODO: determine whether it is OK to change to: type: float
+"""
-
-import codecs
+import os
import yaml
@@ -58,9 +58,9 @@ class CacheModule(BaseFileCacheModule):
"""
def _load(self, filepath):
- with codecs.open(filepath, 'r', encoding='utf-8') as f:
+ with open(os.path.abspath(filepath), 'r', encoding='utf-8') as f:
return AnsibleLoader(f).get_single_data()
def _dump(self, value, filepath):
- with codecs.open(filepath, 'w', encoding='utf-8') as f:
+ with open(os.path.abspath(filepath), 'w', encoding='utf-8') as f:
yaml.dump(value, f, Dumper=AnsibleDumper, default_flow_style=False)
diff --git a/plugins/callback/cgroup_memory_recap.py b/plugins/callback/cgroup_memory_recap.py
index a894336c8f..294ee4b378 100644
--- a/plugins/callback/cgroup_memory_recap.py
+++ b/plugins/callback/cgroup_memory_recap.py
@@ -1,44 +1,45 @@
-# -*- coding: utf-8 -*-
# Copyright (c) 2018 Matt Martz
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
# Make coding more python3-ish
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
-DOCUMENTATION = '''
- author: Unknown (!UNKNOWN)
- name: cgroup_memory_recap
- type: aggregate
- requirements:
- - whitelist in configuration
- - cgroups
- short_description: Profiles maximum memory usage of tasks and full execution using cgroups
- description:
- - This is an ansible callback plugin that profiles maximum memory usage of ansible and individual tasks, and displays a recap at the end using cgroups
- notes:
- - Requires ansible to be run from within a cgroup, such as with C(cgexec -g memory:ansible_profile ansible-playbook ...)
- - This cgroup should only be used by ansible to get accurate results
- - To create the cgroup, first use a command such as C(sudo cgcreate -a ec2-user:ec2-user -t ec2-user:ec2-user -g memory:ansible_profile)
- options:
- max_mem_file:
- required: True
- description: Path to cgroups C(memory.max_usage_in_bytes) file. Example C(/sys/fs/cgroup/memory/ansible_profile/memory.max_usage_in_bytes)
- env:
- - name: CGROUP_MAX_MEM_FILE
- ini:
- - section: callback_cgroupmemrecap
- key: max_mem_file
- cur_mem_file:
- required: True
- description: Path to C(memory.usage_in_bytes) file. Example C(/sys/fs/cgroup/memory/ansible_profile/memory.usage_in_bytes)
- env:
- - name: CGROUP_CUR_MEM_FILE
- ini:
- - section: callback_cgroupmemrecap
- key: cur_mem_file
-'''
+DOCUMENTATION = r"""
+author: Unknown (!UNKNOWN)
+name: cgroup_memory_recap
+type: aggregate
+requirements:
+ - whitelist in configuration
+ - cgroups
+short_description: Profiles maximum memory usage of tasks and full execution using cgroups
+description:
+ - This is an Ansible callback plugin that profiles maximum memory usage of Ansible and individual tasks, and displays a
+ recap at the end using cgroups.
+notes:
+ - Requires ansible to be run from within a C(cgroup), such as with C(cgexec -g memory:ansible_profile ansible-playbook ...).
+ - This C(cgroup) should only be used by Ansible to get accurate results.
+ - To create the C(cgroup), first use a command such as C(sudo cgcreate -a ec2-user:ec2-user -t ec2-user:ec2-user -g memory:ansible_profile).
+options:
+ max_mem_file:
+ required: true
+ description: Path to cgroups C(memory.max_usage_in_bytes) file. Example V(/sys/fs/cgroup/memory/ansible_profile/memory.max_usage_in_bytes).
+ type: str
+ env:
+ - name: CGROUP_MAX_MEM_FILE
+ ini:
+ - section: callback_cgroupmemrecap
+ key: max_mem_file
+ cur_mem_file:
+ required: true
+ description: Path to C(memory.usage_in_bytes) file. Example V(/sys/fs/cgroup/memory/ansible_profile/memory.usage_in_bytes).
+ type: str
+ env:
+ - name: CGROUP_CUR_MEM_FILE
+ ini:
+ - section: callback_cgroupmemrecap
+ key: cur_mem_file
+"""
import time
import threading
@@ -112,7 +113,7 @@ class CallbackModule(CallbackBase):
max_results = int(f.read().strip()) / 1024 / 1024
self._display.banner('CGROUP MEMORY RECAP')
- self._display.display('Execution Maximum: %0.2fMB\n\n' % max_results)
+ self._display.display(f'Execution Maximum: {max_results:0.2f}MB\n\n')
for task, memory in self.task_results:
- self._display.display('%s (%s): %0.2fMB' % (task.get_name(), task._uuid, memory))
+ self._display.display(f'{task.get_name()} ({task._uuid}): {memory:0.2f}MB')
diff --git a/plugins/callback/context_demo.py b/plugins/callback/context_demo.py
index 9c3c9c5afc..f390a947a4 100644
--- a/plugins/callback/context_demo.py
+++ b/plugins/callback/context_demo.py
@@ -1,23 +1,21 @@
-# -*- coding: utf-8 -*-
# Copyright (C) 2012, Michael DeHaan,
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
-DOCUMENTATION = '''
- author: Unknown (!UNKNOWN)
- name: context_demo
- type: aggregate
- short_description: demo callback that adds play/task context
- description:
- - Displays some play and task context along with normal output
- - This is mostly for demo purposes
- requirements:
- - whitelist in configuration
-'''
+DOCUMENTATION = r"""
+author: Unknown (!UNKNOWN)
+name: context_demo
+type: aggregate
+short_description: Demo callback that adds play/task context
+description:
+ - Displays some play and task context along with normal output.
+ - This is mostly for demo purposes.
+requirements:
+ - whitelist in configuration
+"""
from ansible.plugins.callback import CallbackBase
@@ -38,15 +36,15 @@ class CallbackModule(CallbackBase):
self.play = None
def v2_on_any(self, *args, **kwargs):
- self._display.display("--- play: {0} task: {1} ---".format(getattr(self.play, 'name', None), self.task))
+ self._display.display(f"--- play: {getattr(self.play, 'name', None)} task: {self.task} ---")
self._display.display(" --- ARGS ")
for i, a in enumerate(args):
- self._display.display(' %s: %s' % (i, a))
+ self._display.display(f' {i}: {a}')
self._display.display(" --- KWARGS ")
for k in kwargs:
- self._display.display(' %s: %s' % (k, kwargs[k]))
+ self._display.display(f' {k}: {kwargs[k]}')
def v2_playbook_on_play_start(self, play):
self.play = play
diff --git a/plugins/callback/counter_enabled.py b/plugins/callback/counter_enabled.py
index e0e040c9d4..d5fe334a49 100644
--- a/plugins/callback/counter_enabled.py
+++ b/plugins/callback/counter_enabled.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
# Copyright (c) 2018, Ivan Aragones Muniesa
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
@@ -6,28 +5,26 @@
Counter enabled Ansible callback plugin (See DOCUMENTATION for more information)
'''
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
-DOCUMENTATION = '''
- author: Unknown (!UNKNOWN)
- name: counter_enabled
- type: stdout
- short_description: adds counters to the output items (tasks and hosts/task)
- description:
- - Use this callback when you need a kind of progress bar on a large environments.
- - You will know how many tasks has the playbook to run, and which one is actually running.
- - You will know how many hosts may run a task, and which of them is actually running.
- extends_documentation_fragment:
- - default_callback
- requirements:
- - set as stdout callback in ansible.cfg (stdout_callback = counter_enabled)
-'''
+DOCUMENTATION = r"""
+author: Unknown (!UNKNOWN)
+name: counter_enabled
+type: stdout
+short_description: Adds counters to the output items (tasks and hosts/task)
+description:
+ - Use this callback when you need a kind of progress bar on a large environments.
+ - You can see how many tasks has the playbook to run, and which one is actually running.
+ - You can see how many hosts may run a task, and which of them is actually running.
+extends_documentation_fragment:
+ - default_callback
+requirements:
+ - set as stdout callback in C(ansible.cfg) (C(stdout_callback = counter_enabled))
+"""
from ansible import constants as C
from ansible.plugins.callback import CallbackBase
from ansible.utils.color import colorize, hostcolor
-from ansible.template import Templar
from ansible.playbook.task_include import TaskInclude
@@ -70,9 +67,9 @@ class CallbackModule(CallbackBase):
def v2_playbook_on_play_start(self, play):
name = play.get_name().strip()
if not name:
- msg = u"play"
+ msg = "play"
else:
- msg = u"PLAY [%s]" % name
+ msg = f"PLAY [{name}]"
self._play = play
@@ -92,25 +89,17 @@ class CallbackModule(CallbackBase):
for host in hosts:
stat = stats.summarize(host)
- self._display.display(u"%s : %s %s %s %s %s %s" % (
- hostcolor(host, stat),
- colorize(u'ok', stat['ok'], C.COLOR_OK),
- colorize(u'changed', stat['changed'], C.COLOR_CHANGED),
- colorize(u'unreachable', stat['unreachable'], C.COLOR_UNREACHABLE),
- colorize(u'failed', stat['failures'], C.COLOR_ERROR),
- colorize(u'rescued', stat['rescued'], C.COLOR_OK),
- colorize(u'ignored', stat['ignored'], C.COLOR_WARN)),
+ self._display.display(
+ f"{hostcolor(host, stat)} : {colorize('ok', stat['ok'], C.COLOR_OK)} {colorize('changed', stat['changed'], C.COLOR_CHANGED)} "
+ f"{colorize('unreachable', stat['unreachable'], C.COLOR_UNREACHABLE)} {colorize('failed', stat['failures'], C.COLOR_ERROR)} "
+ f"{colorize('rescued', stat['rescued'], C.COLOR_OK)} {colorize('ignored', stat['ignored'], C.COLOR_WARN)}",
screen_only=True
)
- self._display.display(u"%s : %s %s %s %s %s %s" % (
- hostcolor(host, stat, False),
- colorize(u'ok', stat['ok'], None),
- colorize(u'changed', stat['changed'], None),
- colorize(u'unreachable', stat['unreachable'], None),
- colorize(u'failed', stat['failures'], None),
- colorize(u'rescued', stat['rescued'], None),
- colorize(u'ignored', stat['ignored'], None)),
+ self._display.display(
+ f"{hostcolor(host, stat, False)} : {colorize('ok', stat['ok'], None)} {colorize('changed', stat['changed'], None)} "
+ f"{colorize('unreachable', stat['unreachable'], None)} {colorize('failed', stat['failures'], None)} "
+ f"{colorize('rescued', stat['rescued'], None)} {colorize('ignored', stat['ignored'], None)}",
log_only=True
)
@@ -125,12 +114,14 @@ class CallbackModule(CallbackBase):
for k in sorted(stats.custom.keys()):
if k == '_run':
continue
- self._display.display('\t%s: %s' % (k, self._dump_results(stats.custom[k], indent=1).replace('\n', '')))
+ _custom_stats = self._dump_results(stats.custom[k], indent=1).replace('\n', '')
+ self._display.display(f'\t{k}: {_custom_stats}')
# print per run custom stats
if '_run' in stats.custom:
self._display.display("", screen_only=True)
- self._display.display('\tRUN: %s' % self._dump_results(stats.custom['_run'], indent=1).replace('\n', ''))
+ _custom_stats_run = self._dump_results(stats.custom['_run'], indent=1).replace('\n', '')
+ self._display.display(f'\tRUN: {_custom_stats_run}')
self._display.display("", screen_only=True)
def v2_playbook_on_task_start(self, task, is_conditional):
@@ -144,13 +135,13 @@ class CallbackModule(CallbackBase):
# that they can secure this if they feel that their stdout is insecure
# (shoulder surfing, logging stdout straight to a file, etc).
if not task.no_log and C.DISPLAY_ARGS_TO_STDOUT:
- args = ', '.join(('%s=%s' % a for a in task.args.items()))
- args = ' %s' % args
- self._display.banner("TASK %d/%d [%s%s]" % (self._task_counter, self._task_total, task.get_name().strip(), args))
+ args = ', '.join(('{k}={v}' for k, v in task.args.items()))
+ args = f' {args}'
+ self._display.banner(f"TASK {self._task_counter}/{self._task_total} [{task.get_name().strip()}{args}]")
if self._display.verbosity >= 2:
path = task.get_path()
if path:
- self._display.display("task path: %s" % path, color=C.COLOR_DEBUG)
+ self._display.display(f"task path: {path}", color=C.COLOR_DEBUG)
self._host_counter = self._previous_batch_total
self._task_counter += 1
@@ -167,15 +158,15 @@ class CallbackModule(CallbackBase):
return
elif result._result.get('changed', False):
if delegated_vars:
- msg = "changed: %d/%d [%s -> %s]" % (self._host_counter, self._host_total, result._host.get_name(), delegated_vars['ansible_host'])
+ msg = f"changed: {self._host_counter}/{self._host_total} [{result._host.get_name()} -> {delegated_vars['ansible_host']}]"
else:
- msg = "changed: %d/%d [%s]" % (self._host_counter, self._host_total, result._host.get_name())
+ msg = f"changed: {self._host_counter}/{self._host_total} [{result._host.get_name()}]"
color = C.COLOR_CHANGED
else:
if delegated_vars:
- msg = "ok: %d/%d [%s -> %s]" % (self._host_counter, self._host_total, result._host.get_name(), delegated_vars['ansible_host'])
+ msg = f"ok: {self._host_counter}/{self._host_total} [{result._host.get_name()} -> {delegated_vars['ansible_host']}]"
else:
- msg = "ok: %d/%d [%s]" % (self._host_counter, self._host_total, result._host.get_name())
+ msg = f"ok: {self._host_counter}/{self._host_total} [{result._host.get_name()}]"
color = C.COLOR_OK
self._handle_warnings(result._result)
@@ -186,7 +177,7 @@ class CallbackModule(CallbackBase):
self._clean_results(result._result, result._task.action)
if self._run_is_verbose(result):
- msg += " => %s" % (self._dump_results(result._result),)
+ msg += f" => {self._dump_results(result._result)}"
self._display.display(msg, color=color)
def v2_runner_on_failed(self, result, ignore_errors=False):
@@ -207,14 +198,16 @@ class CallbackModule(CallbackBase):
else:
if delegated_vars:
- self._display.display("fatal: %d/%d [%s -> %s]: FAILED! => %s" % (self._host_counter, self._host_total,
- result._host.get_name(), delegated_vars['ansible_host'],
- self._dump_results(result._result)),
- color=C.COLOR_ERROR)
+ self._display.display(
+ f"fatal: {self._host_counter}/{self._host_total} [{result._host.get_name()} -> "
+ f"{delegated_vars['ansible_host']}]: FAILED! => {self._dump_results(result._result)}",
+ color=C.COLOR_ERROR
+ )
else:
- self._display.display("fatal: %d/%d [%s]: FAILED! => %s" % (self._host_counter, self._host_total,
- result._host.get_name(), self._dump_results(result._result)),
- color=C.COLOR_ERROR)
+ self._display.display(
+ f"fatal: {self._host_counter}/{self._host_total} [{result._host.get_name()}]: FAILED! => {self._dump_results(result._result)}",
+ color=C.COLOR_ERROR
+ )
if ignore_errors:
self._display.display("...ignoring", color=C.COLOR_SKIP)
@@ -232,9 +225,9 @@ class CallbackModule(CallbackBase):
if result._task.loop and 'results' in result._result:
self._process_items(result)
else:
- msg = "skipping: %d/%d [%s]" % (self._host_counter, self._host_total, result._host.get_name())
+ msg = f"skipping: {self._host_counter}/{self._host_total} [{result._host.get_name()}]"
if self._run_is_verbose(result):
- msg += " => %s" % self._dump_results(result._result)
+ msg += f" => {self._dump_results(result._result)}"
self._display.display(msg, color=C.COLOR_SKIP)
def v2_runner_on_unreachable(self, result):
@@ -245,11 +238,13 @@ class CallbackModule(CallbackBase):
delegated_vars = result._result.get('_ansible_delegated_vars', None)
if delegated_vars:
- self._display.display("fatal: %d/%d [%s -> %s]: UNREACHABLE! => %s" % (self._host_counter, self._host_total,
- result._host.get_name(), delegated_vars['ansible_host'],
- self._dump_results(result._result)),
- color=C.COLOR_UNREACHABLE)
+ self._display.display(
+ f"fatal: {self._host_counter}/{self._host_total} [{result._host.get_name()} -> "
+ f"{delegated_vars['ansible_host']}]: UNREACHABLE! => {self._dump_results(result._result)}",
+ color=C.COLOR_UNREACHABLE
+ )
else:
- self._display.display("fatal: %d/%d [%s]: UNREACHABLE! => %s" % (self._host_counter, self._host_total,
- result._host.get_name(), self._dump_results(result._result)),
- color=C.COLOR_UNREACHABLE)
+ self._display.display(
+ f"fatal: {self._host_counter}/{self._host_total} [{result._host.get_name()}]: UNREACHABLE! => {self._dump_results(result._result)}",
+ color=C.COLOR_UNREACHABLE
+ )
diff --git a/plugins/callback/default_without_diff.py b/plugins/callback/default_without_diff.py
new file mode 100644
index 0000000000..b0315829b5
--- /dev/null
+++ b/plugins/callback/default_without_diff.py
@@ -0,0 +1,43 @@
+
+# Copyright (c) 2024, Felix Fontein
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import annotations
+
+DOCUMENTATION = r"""
+name: default_without_diff
+type: stdout
+short_description: The default ansible callback without diff output
+version_added: 8.4.0
+description:
+ - This is basically the default ansible callback plugin (P(ansible.builtin.default#callback)) without showing diff output.
+ This can be useful when using another callback which sends more detailed information to another service, like the L(ARA,
+ https://ara.recordsansible.org/) callback, and you want diff output sent to that plugin but not shown on the console output.
+author: Felix Fontein (@felixfontein)
+extends_documentation_fragment:
+ - ansible.builtin.default_callback
+ - ansible.builtin.result_format_callback
+"""
+
+EXAMPLES = r"""
+# Enable callback in ansible.cfg:
+ansible_config: |
+ [defaults]
+ stdout_callback = community.general.default_without_diff
+
+# Enable callback with environment variables:
+environment_variable: |-
+ ANSIBLE_STDOUT_CALLBACK=community.general.default_without_diff
+"""
+
+from ansible.plugins.callback.default import CallbackModule as Default
+
+
+class CallbackModule(Default):
+ CALLBACK_VERSION = 2.0
+ CALLBACK_TYPE = 'stdout'
+ CALLBACK_NAME = 'community.general.default_without_diff'
+
+ def v2_on_file_diff(self, result):
+ pass
diff --git a/plugins/callback/dense.py b/plugins/callback/dense.py
index 18e4f162ff..de50d97ce1 100644
--- a/plugins/callback/dense.py
+++ b/plugins/callback/dense.py
@@ -1,25 +1,23 @@
-# -*- coding: utf-8 -*-
# Copyright (c) 2016, Dag Wieers
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
-DOCUMENTATION = '''
+DOCUMENTATION = r"""
name: dense
type: stdout
-short_description: minimal stdout output
+short_description: Minimal stdout output
extends_documentation_fragment:
-- default_callback
+ - default_callback
description:
-- When in verbose mode it will act the same as the default callback
+ - When in verbose mode it acts the same as the default callback.
author:
-- Dag Wieers (@dagwieers)
+ - Dag Wieers (@dagwieers)
requirements:
-- set as stdout in configuration
-'''
+ - set as stdout in configuration
+"""
HAS_OD = False
try:
@@ -28,8 +26,7 @@ try:
except ImportError:
pass
-from ansible.module_utils.six import binary_type, text_type
-from ansible.module_utils.common._collections_compat import MutableMapping, MutableSequence
+from collections.abc import MutableMapping, MutableSequence
from ansible.plugins.callback.default import CallbackModule as CallbackModule_default
from ansible.utils.color import colorize, hostcolor
from ansible.utils.display import Display
@@ -195,7 +192,7 @@ class CallbackModule(CallbackModule_default):
self.disabled = True
def __del__(self):
- sys.stdout.write(vt100.restore + vt100.reset + '\n' + vt100.save + vt100.clearline)
+ sys.stdout.write(f"{vt100.restore}{vt100.reset}\n{vt100.save}{vt100.clearline}")
def _add_host(self, result, status):
name = result._host.get_name()
@@ -237,13 +234,13 @@ class CallbackModule(CallbackModule_default):
# Remove empty attributes (list, dict, str)
for attr in result.copy():
- if isinstance(result[attr], (MutableSequence, MutableMapping, binary_type, text_type)):
+ if isinstance(result[attr], (MutableSequence, MutableMapping, bytes, str)):
if not result[attr]:
del result[attr]
def _handle_exceptions(self, result):
if 'exception' in result:
- # Remove the exception from the result so it's not shown every time
+ # Remove the exception from the result so it is not shown every time
del result['exception']
if self._display.verbosity == 1:
@@ -252,7 +249,7 @@ class CallbackModule(CallbackModule_default):
def _display_progress(self, result=None):
# Always rewrite the complete line
sys.stdout.write(vt100.restore + vt100.reset + vt100.clearline + vt100.nolinewrap + vt100.underline)
- sys.stdout.write('%s %d:' % (self.type, self.count[self.type]))
+ sys.stdout.write(f'{self.type} {self.count[self.type]}:')
sys.stdout.write(vt100.reset)
sys.stdout.flush()
@@ -260,22 +257,18 @@ class CallbackModule(CallbackModule_default):
for name in self.hosts:
sys.stdout.write(' ')
if self.hosts[name].get('delegate', None):
- sys.stdout.write(self.hosts[name]['delegate'] + '>')
+ sys.stdout.write(f"{self.hosts[name]['delegate']}>")
sys.stdout.write(colors[self.hosts[name]['state']] + name + vt100.reset)
sys.stdout.flush()
-# if result._result.get('diff', False):
-# sys.stdout.write('\n' + vt100.linewrap)
sys.stdout.write(vt100.linewrap)
-# self.keep = True
-
def _display_task_banner(self):
if not self.shown_title:
self.shown_title = True
sys.stdout.write(vt100.restore + vt100.reset + vt100.clearline + vt100.underline)
- sys.stdout.write('%s %d: %s' % (self.type, self.count[self.type], self.task.get_name().strip()))
- sys.stdout.write(vt100.restore + vt100.reset + '\n' + vt100.save + vt100.clearline)
+ sys.stdout.write(f'{self.type} {self.count[self.type]}: {self.task.get_name().strip()}')
+ sys.stdout.write(f"{vt100.restore}{vt100.reset}\n{vt100.save}{vt100.clearline}")
sys.stdout.flush()
else:
sys.stdout.write(vt100.restore + vt100.reset + vt100.clearline)
@@ -284,7 +277,7 @@ class CallbackModule(CallbackModule_default):
def _display_results(self, result, status):
# Leave the previous task on screen (as it has changes/errors)
if self._display.verbosity == 0 and self.keep:
- sys.stdout.write(vt100.restore + vt100.reset + '\n' + vt100.save + vt100.clearline)
+ sys.stdout.write(f"{vt100.restore}{vt100.reset}\n{vt100.save}{vt100.clearline}")
else:
sys.stdout.write(vt100.restore + vt100.reset + vt100.clearline)
self.keep = False
@@ -309,16 +302,16 @@ class CallbackModule(CallbackModule_default):
if result._task.loop and 'results' in result._result:
self._process_items(result)
else:
- sys.stdout.write(colors[status] + status + ': ')
+ sys.stdout.write(f"{colors[status] + status}: ")
delegated_vars = result._result.get('_ansible_delegated_vars', None)
if delegated_vars:
- sys.stdout.write(vt100.reset + result._host.get_name() + '>' + colors[status] + delegated_vars['ansible_host'])
+ sys.stdout.write(f"{vt100.reset}{result._host.get_name()}>{colors[status]}{delegated_vars['ansible_host']}")
else:
sys.stdout.write(result._host.get_name())
- sys.stdout.write(': ' + dump + '\n')
- sys.stdout.write(vt100.reset + vt100.save + vt100.clearline)
+ sys.stdout.write(f": {dump}\n")
+ sys.stdout.write(f"{vt100.reset}{vt100.save}{vt100.clearline}")
sys.stdout.flush()
if status == 'changed':
@@ -327,7 +320,7 @@ class CallbackModule(CallbackModule_default):
def v2_playbook_on_play_start(self, play):
# Leave the previous task on screen (as it has changes/errors)
if self._display.verbosity == 0 and self.keep:
- sys.stdout.write(vt100.restore + vt100.reset + '\n' + vt100.save + vt100.clearline + vt100.bold)
+ sys.stdout.write(f"{vt100.restore}{vt100.reset}\n{vt100.save}{vt100.clearline}{vt100.bold}")
else:
sys.stdout.write(vt100.restore + vt100.reset + vt100.clearline + vt100.bold)
@@ -341,14 +334,14 @@ class CallbackModule(CallbackModule_default):
name = play.get_name().strip()
if not name:
name = 'unnamed'
- sys.stdout.write('PLAY %d: %s' % (self.count['play'], name.upper()))
- sys.stdout.write(vt100.restore + vt100.reset + '\n' + vt100.save + vt100.clearline)
+ sys.stdout.write(f"PLAY {self.count['play']}: {name.upper()}")
+ sys.stdout.write(f"{vt100.restore}{vt100.reset}\n{vt100.save}{vt100.clearline}")
sys.stdout.flush()
def v2_playbook_on_task_start(self, task, is_conditional):
# Leave the previous task on screen (as it has changes/errors)
if self._display.verbosity == 0 and self.keep:
- sys.stdout.write(vt100.restore + vt100.reset + '\n' + vt100.save + vt100.clearline + vt100.underline)
+ sys.stdout.write(f"{vt100.restore}{vt100.reset}\n{vt100.save}{vt100.clearline}{vt100.underline}")
else:
# Do not clear line, since we want to retain the previous output
sys.stdout.write(vt100.restore + vt100.reset + vt100.underline)
@@ -365,14 +358,14 @@ class CallbackModule(CallbackModule_default):
self.count['task'] += 1
# Write the next task on screen (behind the prompt is the previous output)
- sys.stdout.write('%s %d.' % (self.type, self.count[self.type]))
+ sys.stdout.write(f'{self.type} {self.count[self.type]}.')
sys.stdout.write(vt100.reset)
sys.stdout.flush()
def v2_playbook_on_handler_task_start(self, task):
# Leave the previous task on screen (as it has changes/errors)
if self._display.verbosity == 0 and self.keep:
- sys.stdout.write(vt100.restore + vt100.reset + '\n' + vt100.save + vt100.clearline + vt100.underline)
+ sys.stdout.write(f"{vt100.restore}{vt100.reset}\n{vt100.save}{vt100.clearline}{vt100.underline}")
else:
sys.stdout.write(vt100.restore + vt100.reset + vt100.clearline + vt100.underline)
@@ -388,7 +381,7 @@ class CallbackModule(CallbackModule_default):
self.count[self.type] += 1
# Write the next task on screen (behind the prompt is the previous output)
- sys.stdout.write('%s %d.' % (self.type, self.count[self.type]))
+ sys.stdout.write(f'{self.type} {self.count[self.type]}.')
sys.stdout.write(vt100.reset)
sys.stdout.flush()
@@ -451,13 +444,13 @@ class CallbackModule(CallbackModule_default):
def v2_playbook_on_no_hosts_remaining(self):
if self._display.verbosity == 0 and self.keep:
- sys.stdout.write(vt100.restore + vt100.reset + '\n' + vt100.save + vt100.clearline)
+ sys.stdout.write(f"{vt100.restore}{vt100.reset}\n{vt100.save}{vt100.clearline}")
else:
sys.stdout.write(vt100.restore + vt100.reset + vt100.clearline)
self.keep = False
- sys.stdout.write(vt100.white + vt100.redbg + 'NO MORE HOSTS LEFT')
- sys.stdout.write(vt100.restore + vt100.reset + '\n' + vt100.save + vt100.clearline)
+ sys.stdout.write(f"{vt100.white + vt100.redbg}NO MORE HOSTS LEFT")
+ sys.stdout.write(f"{vt100.restore}{vt100.reset}\n{vt100.save}{vt100.clearline}")
sys.stdout.flush()
def v2_playbook_on_include(self, included_file):
@@ -465,7 +458,7 @@ class CallbackModule(CallbackModule_default):
def v2_playbook_on_stats(self, stats):
if self._display.verbosity == 0 and self.keep:
- sys.stdout.write(vt100.restore + vt100.reset + '\n' + vt100.save + vt100.clearline)
+ sys.stdout.write(f"{vt100.restore}{vt100.reset}\n{vt100.save}{vt100.clearline}")
else:
sys.stdout.write(vt100.restore + vt100.reset + vt100.clearline)
@@ -476,22 +469,16 @@ class CallbackModule(CallbackModule_default):
sys.stdout.write(vt100.bold + vt100.underline)
sys.stdout.write('SUMMARY')
- sys.stdout.write(vt100.restore + vt100.reset + '\n' + vt100.save + vt100.clearline)
+ sys.stdout.write(f"{vt100.restore}{vt100.reset}\n{vt100.save}{vt100.clearline}")
sys.stdout.flush()
hosts = sorted(stats.processed.keys())
for h in hosts:
t = stats.summarize(h)
self._display.display(
- u"%s : %s %s %s %s %s %s" % (
- hostcolor(h, t),
- colorize(u'ok', t['ok'], C.COLOR_OK),
- colorize(u'changed', t['changed'], C.COLOR_CHANGED),
- colorize(u'unreachable', t['unreachable'], C.COLOR_UNREACHABLE),
- colorize(u'failed', t['failures'], C.COLOR_ERROR),
- colorize(u'rescued', t['rescued'], C.COLOR_OK),
- colorize(u'ignored', t['ignored'], C.COLOR_WARN),
- ),
+ f"{hostcolor(h, t)} : {colorize('ok', t['ok'], C.COLOR_OK)} {colorize('changed', t['changed'], C.COLOR_CHANGED)} "
+ f"{colorize('unreachable', t['unreachable'], C.COLOR_UNREACHABLE)} {colorize('failed', t['failures'], C.COLOR_ERROR)} "
+ f"{colorize('rescued', t['rescued'], C.COLOR_OK)} {colorize('ignored', t['ignored'], C.COLOR_WARN)}",
screen_only=True
)
diff --git a/plugins/callback/diy.py b/plugins/callback/diy.py
index ed194b5cb8..c94fe25093 100644
--- a/plugins/callback/diy.py
+++ b/plugins/callback/diy.py
@@ -1,608 +1,601 @@
-# -*- coding: utf-8 -*-
# Copyright (c) 2019, Trevor Highfill
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
-DOCUMENTATION = r'''
- name: diy
- type: stdout
- short_description: Customize the output
- version_added: 0.2.0
- description:
- - Callback plugin that allows you to supply your own custom callback templates to be output.
- author: Trevor Highfill (@theque5t)
- extends_documentation_fragment:
- - default_callback
- notes:
- - Uses the C(default) callback plugin output when a custom callback message(C(msg)) is not provided.
- - Makes the callback event data available via the C(ansible_callback_diy) dictionary, which can be used in the templating context for the options.
- The dictionary is only available in the templating context for the options. It is not a variable that is available via the other
- various execution contexts, such as playbook, play, task etc.
- - Options being set by their respective variable input can only be set using the variable if the variable was set in a context that is available to the
- respective callback.
- Use the C(ansible_callback_diy) dictionary to see what is available to a callback. Additionally, C(ansible_callback_diy.top_level_var_names) will output
- the top level variable names available to the callback.
- - Each option value is rendered as a template before being evaluated. This allows for the dynamic usage of an option. For example,
- C("{{ 'yellow' if ansible_callback_diy.result.is_changed else 'bright green' }}")
- - "**Condition** for all C(msg) options:
- if value C(is None or omit),
- then the option is not being used.
- **Effect**: use of the C(default) callback plugin for output"
- - "**Condition** for all C(msg) options:
- if value C(is not None and not omit and length is not greater than 0),
- then the option is being used without output.
- **Effect**: suppress output"
- - "**Condition** for all C(msg) options:
- if value C(is not None and not omit and length is greater than 0),
- then the option is being used with output.
- **Effect**: render value as template and output"
- - "Valid color values: C(black), C(bright gray), C(blue), C(white), C(green), C(bright blue), C(cyan), C(bright green), C(red), C(bright cyan),
- C(purple), C(bright red), C(yellow), C(bright purple), C(dark gray), C(bright yellow), C(magenta), C(bright magenta), C(normal)"
- seealso:
- - name: default – default Ansible screen output
- description: The official documentation on the B(default) callback plugin.
- link: https://docs.ansible.com/ansible/latest/plugins/callback/default.html
- requirements:
- - set as stdout_callback in configuration
- options:
- on_any_msg:
- description: Output to be used for callback on_any.
- ini:
- - section: callback_diy
- key: on_any_msg
- env:
- - name: ANSIBLE_CALLBACK_DIY_ON_ANY_MSG
- vars:
- - name: ansible_callback_diy_on_any_msg
- type: str
+DOCUMENTATION = r"""
+name: diy
+type: stdout
+short_description: Customize the output
+version_added: 0.2.0
+description:
+ - Callback plugin that allows you to supply your own custom callback templates to be output.
+author: Trevor Highfill (@theque5t)
+extends_documentation_fragment:
+ - default_callback
+notes:
+ - Uses the P(ansible.builtin.default#callback) callback plugin output when a custom callback V(message(msg\)) is not provided.
+ - Makes the callback event data available using the C(ansible_callback_diy) dictionary, which can be used in the templating
+ context for the options. The dictionary is only available in the templating context for the options. It is not a variable
+ that is available using the other various execution contexts, such as playbook, play, task, and so on so forth.
+ - Options being set by their respective variable input can only be set using the variable if the variable was set in a context
+ that is available to the respective callback. Use the C(ansible_callback_diy) dictionary to see what is available to a
+ callback. Additionally, C(ansible_callback_diy.top_level_var_names) outputs the top level variable names available
+ to the callback.
+ - Each option value is rendered as a template before being evaluated. This allows for the dynamic usage of an option. For
+ example, V("{{ 'yellow' if ansible_callback_diy.result.is_changed else 'bright green' }}").
+ - 'B(Condition) for all C(msg) options: if value V(is None or omit), then the option is not being used. B(Effect): use of
+ the C(default) callback plugin for output.'
+ - 'B(Condition) for all C(msg) options: if value V(is not None and not omit and length is not greater than 0), then the
+ option is being used without output. B(Effect): suppress output.'
+ - 'B(Condition) for all C(msg) options: if value V(is not None and not omit and length is greater than 0), then the option
+ is being used with output. B(Effect): render value as template and output.'
+ - 'Valid color values: V(black), V(bright gray), V(blue), V(white), V(green), V(bright blue), V(cyan), V(bright green),
+ V(red), V(bright cyan), V(purple), V(bright red), V(yellow), V(bright purple), V(dark gray), V(bright yellow), V(magenta),
+ V(bright magenta), V(normal).'
+seealso:
+ - name: default – default Ansible screen output
+ description: The official documentation on the B(default) callback plugin.
+ link: https://docs.ansible.com/ansible/latest/plugins/callback/default.html
+requirements:
+ - set as stdout_callback in configuration
+options:
+ on_any_msg:
+ description: Output to be used for callback on_any.
+ ini:
+ - section: callback_diy
+ key: on_any_msg
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_ON_ANY_MSG
+ vars:
+ - name: ansible_callback_diy_on_any_msg
+ type: str
- on_any_msg_color:
- description:
- - Output color to be used for I(on_any_msg).
- - Template should render a L(valid color value,#notes).
- ini:
- - section: callback_diy
- key: on_any_msg_color
- env:
- - name: ANSIBLE_CALLBACK_DIY_ON_ANY_MSG_COLOR
- vars:
- - name: ansible_callback_diy_on_any_msg_color
- type: str
+ on_any_msg_color:
+ description:
+ - Output color to be used for O(on_any_msg).
+ - Template should render a L(valid color value,#notes).
+ ini:
+ - section: callback_diy
+ key: on_any_msg_color
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_ON_ANY_MSG_COLOR
+ vars:
+ - name: ansible_callback_diy_on_any_msg_color
+ type: str
- runner_on_failed_msg:
- description: Output to be used for callback runner_on_failed.
- ini:
- - section: callback_diy
- key: runner_on_failed_msg
- env:
- - name: ANSIBLE_CALLBACK_DIY_RUNNER_ON_FAILED_MSG
- vars:
- - name: ansible_callback_diy_runner_on_failed_msg
- type: str
+ runner_on_failed_msg:
+ description: Output to be used for callback runner_on_failed.
+ ini:
+ - section: callback_diy
+ key: runner_on_failed_msg
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_RUNNER_ON_FAILED_MSG
+ vars:
+ - name: ansible_callback_diy_runner_on_failed_msg
+ type: str
- runner_on_failed_msg_color:
- description:
- - Output color to be used for I(runner_on_failed_msg).
- - Template should render a L(valid color value,#notes).
- ini:
- - section: callback_diy
- key: runner_on_failed_msg_color
- env:
- - name: ANSIBLE_CALLBACK_DIY_RUNNER_ON_FAILED_MSG_COLOR
- vars:
- - name: ansible_callback_diy_runner_on_failed_msg_color
- type: str
+ runner_on_failed_msg_color:
+ description:
+ - Output color to be used for O(runner_on_failed_msg).
+ - Template should render a L(valid color value,#notes).
+ ini:
+ - section: callback_diy
+ key: runner_on_failed_msg_color
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_RUNNER_ON_FAILED_MSG_COLOR
+ vars:
+ - name: ansible_callback_diy_runner_on_failed_msg_color
+ type: str
- runner_on_ok_msg:
- description: Output to be used for callback runner_on_ok.
- ini:
- - section: callback_diy
- key: runner_on_ok_msg
- env:
- - name: ANSIBLE_CALLBACK_DIY_RUNNER_ON_OK_MSG
- vars:
- - name: ansible_callback_diy_runner_on_ok_msg
- type: str
+ runner_on_ok_msg:
+ description: Output to be used for callback runner_on_ok.
+ ini:
+ - section: callback_diy
+ key: runner_on_ok_msg
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_RUNNER_ON_OK_MSG
+ vars:
+ - name: ansible_callback_diy_runner_on_ok_msg
+ type: str
- runner_on_ok_msg_color:
- description:
- - Output color to be used for I(runner_on_ok_msg).
- - Template should render a L(valid color value,#notes).
- ini:
- - section: callback_diy
- key: runner_on_ok_msg_color
- env:
- - name: ANSIBLE_CALLBACK_DIY_RUNNER_ON_OK_MSG_COLOR
- vars:
- - name: ansible_callback_diy_runner_on_ok_msg_color
- type: str
+ runner_on_ok_msg_color:
+ description:
+ - Output color to be used for O(runner_on_ok_msg).
+ - Template should render a L(valid color value,#notes).
+ ini:
+ - section: callback_diy
+ key: runner_on_ok_msg_color
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_RUNNER_ON_OK_MSG_COLOR
+ vars:
+ - name: ansible_callback_diy_runner_on_ok_msg_color
+ type: str
- runner_on_skipped_msg:
- description: Output to be used for callback runner_on_skipped.
- ini:
- - section: callback_diy
- key: runner_on_skipped_msg
- env:
- - name: ANSIBLE_CALLBACK_DIY_RUNNER_ON_SKIPPED_MSG
- vars:
- - name: ansible_callback_diy_runner_on_skipped_msg
- type: str
+ runner_on_skipped_msg:
+ description: Output to be used for callback runner_on_skipped.
+ ini:
+ - section: callback_diy
+ key: runner_on_skipped_msg
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_RUNNER_ON_SKIPPED_MSG
+ vars:
+ - name: ansible_callback_diy_runner_on_skipped_msg
+ type: str
- runner_on_skipped_msg_color:
- description:
- - Output color to be used for I(runner_on_skipped_msg).
- - Template should render a L(valid color value,#notes).
- ini:
- - section: callback_diy
- key: runner_on_skipped_msg_color
- env:
- - name: ANSIBLE_CALLBACK_DIY_RUNNER_ON_SKIPPED_MSG_COLOR
- vars:
- - name: ansible_callback_diy_runner_on_skipped_msg_color
- type: str
+ runner_on_skipped_msg_color:
+ description:
+ - Output color to be used for O(runner_on_skipped_msg).
+ - Template should render a L(valid color value,#notes).
+ ini:
+ - section: callback_diy
+ key: runner_on_skipped_msg_color
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_RUNNER_ON_SKIPPED_MSG_COLOR
+ vars:
+ - name: ansible_callback_diy_runner_on_skipped_msg_color
+ type: str
- runner_on_unreachable_msg:
- description: Output to be used for callback runner_on_unreachable.
- ini:
- - section: callback_diy
- key: runner_on_unreachable_msg
- env:
- - name: ANSIBLE_CALLBACK_DIY_RUNNER_ON_UNREACHABLE_MSG
- vars:
- - name: ansible_callback_diy_runner_on_unreachable_msg
- type: str
+ runner_on_unreachable_msg:
+ description: Output to be used for callback runner_on_unreachable.
+ ini:
+ - section: callback_diy
+ key: runner_on_unreachable_msg
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_RUNNER_ON_UNREACHABLE_MSG
+ vars:
+ - name: ansible_callback_diy_runner_on_unreachable_msg
+ type: str
- runner_on_unreachable_msg_color:
- description:
- - Output color to be used for I(runner_on_unreachable_msg).
- - Template should render a L(valid color value,#notes).
- ini:
- - section: callback_diy
- key: runner_on_unreachable_msg_color
- env:
- - name: ANSIBLE_CALLBACK_DIY_RUNNER_ON_UNREACHABLE_MSG_COLOR
- vars:
- - name: ansible_callback_diy_runner_on_unreachable_msg_color
- type: str
+ runner_on_unreachable_msg_color:
+ description:
+ - Output color to be used for O(runner_on_unreachable_msg).
+ - Template should render a L(valid color value,#notes).
+ ini:
+ - section: callback_diy
+ key: runner_on_unreachable_msg_color
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_RUNNER_ON_UNREACHABLE_MSG_COLOR
+ vars:
+ - name: ansible_callback_diy_runner_on_unreachable_msg_color
+ type: str
- playbook_on_start_msg:
- description: Output to be used for callback playbook_on_start.
- ini:
- - section: callback_diy
- key: playbook_on_start_msg
- env:
- - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_START_MSG
- vars:
- - name: ansible_callback_diy_playbook_on_start_msg
- type: str
+ playbook_on_start_msg:
+ description: Output to be used for callback playbook_on_start.
+ ini:
+ - section: callback_diy
+ key: playbook_on_start_msg
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_START_MSG
+ vars:
+ - name: ansible_callback_diy_playbook_on_start_msg
+ type: str
- playbook_on_start_msg_color:
- description:
- - Output color to be used for I(playbook_on_start_msg).
- - Template should render a L(valid color value,#notes).
- ini:
- - section: callback_diy
- key: playbook_on_start_msg_color
- env:
- - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_START_MSG_COLOR
- vars:
- - name: ansible_callback_diy_playbook_on_start_msg_color
- type: str
+ playbook_on_start_msg_color:
+ description:
+ - Output color to be used for O(playbook_on_start_msg).
+ - Template should render a L(valid color value,#notes).
+ ini:
+ - section: callback_diy
+ key: playbook_on_start_msg_color
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_START_MSG_COLOR
+ vars:
+ - name: ansible_callback_diy_playbook_on_start_msg_color
+ type: str
- playbook_on_notify_msg:
- description: Output to be used for callback playbook_on_notify.
- ini:
- - section: callback_diy
- key: playbook_on_notify_msg
- env:
- - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_NOTIFY_MSG
- vars:
- - name: ansible_callback_diy_playbook_on_notify_msg
- type: str
+ playbook_on_notify_msg:
+ description: Output to be used for callback playbook_on_notify.
+ ini:
+ - section: callback_diy
+ key: playbook_on_notify_msg
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_NOTIFY_MSG
+ vars:
+ - name: ansible_callback_diy_playbook_on_notify_msg
+ type: str
- playbook_on_notify_msg_color:
- description:
- - Output color to be used for I(playbook_on_notify_msg).
- - Template should render a L(valid color value,#notes).
- ini:
- - section: callback_diy
- key: playbook_on_notify_msg_color
- env:
- - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_NOTIFY_MSG_COLOR
- vars:
- - name: ansible_callback_diy_playbook_on_notify_msg_color
- type: str
+ playbook_on_notify_msg_color:
+ description:
+ - Output color to be used for O(playbook_on_notify_msg).
+ - Template should render a L(valid color value,#notes).
+ ini:
+ - section: callback_diy
+ key: playbook_on_notify_msg_color
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_NOTIFY_MSG_COLOR
+ vars:
+ - name: ansible_callback_diy_playbook_on_notify_msg_color
+ type: str
- playbook_on_no_hosts_matched_msg:
- description: Output to be used for callback playbook_on_no_hosts_matched.
- ini:
- - section: callback_diy
- key: playbook_on_no_hosts_matched_msg
- env:
- - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_NO_HOSTS_MATCHED_MSG
- vars:
- - name: ansible_callback_diy_playbook_on_no_hosts_matched_msg
- type: str
+ playbook_on_no_hosts_matched_msg:
+ description: Output to be used for callback playbook_on_no_hosts_matched.
+ ini:
+ - section: callback_diy
+ key: playbook_on_no_hosts_matched_msg
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_NO_HOSTS_MATCHED_MSG
+ vars:
+ - name: ansible_callback_diy_playbook_on_no_hosts_matched_msg
+ type: str
- playbook_on_no_hosts_matched_msg_color:
- description:
- - Output color to be used for I(playbook_on_no_hosts_matched_msg).
- - Template should render a L(valid color value,#notes).
- ini:
- - section: callback_diy
- key: playbook_on_no_hosts_matched_msg_color
- env:
- - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_NO_HOSTS_MATCHED_MSG_COLOR
- vars:
- - name: ansible_callback_diy_playbook_on_no_hosts_matched_msg_color
- type: str
+ playbook_on_no_hosts_matched_msg_color:
+ description:
+ - Output color to be used for O(playbook_on_no_hosts_matched_msg).
+ - Template should render a L(valid color value,#notes).
+ ini:
+ - section: callback_diy
+ key: playbook_on_no_hosts_matched_msg_color
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_NO_HOSTS_MATCHED_MSG_COLOR
+ vars:
+ - name: ansible_callback_diy_playbook_on_no_hosts_matched_msg_color
+ type: str
- playbook_on_no_hosts_remaining_msg:
- description: Output to be used for callback playbook_on_no_hosts_remaining.
- ini:
- - section: callback_diy
- key: playbook_on_no_hosts_remaining_msg
- env:
- - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_NO_HOSTS_REMAINING_MSG
- vars:
- - name: ansible_callback_diy_playbook_on_no_hosts_remaining_msg
- type: str
+ playbook_on_no_hosts_remaining_msg:
+ description: Output to be used for callback playbook_on_no_hosts_remaining.
+ ini:
+ - section: callback_diy
+ key: playbook_on_no_hosts_remaining_msg
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_NO_HOSTS_REMAINING_MSG
+ vars:
+ - name: ansible_callback_diy_playbook_on_no_hosts_remaining_msg
+ type: str
- playbook_on_no_hosts_remaining_msg_color:
- description:
- - Output color to be used for I(playbook_on_no_hosts_remaining_msg).
- - Template should render a L(valid color value,#notes).
- ini:
- - section: callback_diy
- key: playbook_on_no_hosts_remaining_msg_color
- env:
- - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_NO_HOSTS_REMAINING_MSG_COLOR
- vars:
- - name: ansible_callback_diy_playbook_on_no_hosts_remaining_msg_color
- type: str
+ playbook_on_no_hosts_remaining_msg_color:
+ description:
+ - Output color to be used for O(playbook_on_no_hosts_remaining_msg).
+ - Template should render a L(valid color value,#notes).
+ ini:
+ - section: callback_diy
+ key: playbook_on_no_hosts_remaining_msg_color
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_NO_HOSTS_REMAINING_MSG_COLOR
+ vars:
+ - name: ansible_callback_diy_playbook_on_no_hosts_remaining_msg_color
+ type: str
- playbook_on_task_start_msg:
- description: Output to be used for callback playbook_on_task_start.
- ini:
- - section: callback_diy
- key: playbook_on_task_start_msg
- env:
- - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_TASK_START_MSG
- vars:
- - name: ansible_callback_diy_playbook_on_task_start_msg
- type: str
+ playbook_on_task_start_msg:
+ description: Output to be used for callback playbook_on_task_start.
+ ini:
+ - section: callback_diy
+ key: playbook_on_task_start_msg
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_TASK_START_MSG
+ vars:
+ - name: ansible_callback_diy_playbook_on_task_start_msg
+ type: str
- playbook_on_task_start_msg_color:
- description:
- - Output color to be used for I(playbook_on_task_start_msg).
- - Template should render a L(valid color value,#notes).
- ini:
- - section: callback_diy
- key: playbook_on_task_start_msg_color
- env:
- - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_TASK_START_MSG_COLOR
- vars:
- - name: ansible_callback_diy_playbook_on_task_start_msg_color
- type: str
+ playbook_on_task_start_msg_color:
+ description:
+ - Output color to be used for O(playbook_on_task_start_msg).
+ - Template should render a L(valid color value,#notes).
+ ini:
+ - section: callback_diy
+ key: playbook_on_task_start_msg_color
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_TASK_START_MSG_COLOR
+ vars:
+ - name: ansible_callback_diy_playbook_on_task_start_msg_color
+ type: str
- playbook_on_handler_task_start_msg:
- description: Output to be used for callback playbook_on_handler_task_start.
- ini:
- - section: callback_diy
- key: playbook_on_handler_task_start_msg
- env:
- - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_HANDLER_TASK_START_MSG
- vars:
- - name: ansible_callback_diy_playbook_on_handler_task_start_msg
- type: str
+ playbook_on_handler_task_start_msg:
+ description: Output to be used for callback playbook_on_handler_task_start.
+ ini:
+ - section: callback_diy
+ key: playbook_on_handler_task_start_msg
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_HANDLER_TASK_START_MSG
+ vars:
+ - name: ansible_callback_diy_playbook_on_handler_task_start_msg
+ type: str
- playbook_on_handler_task_start_msg_color:
- description:
- - Output color to be used for I(playbook_on_handler_task_start_msg).
- - Template should render a L(valid color value,#notes).
- ini:
- - section: callback_diy
- key: playbook_on_handler_task_start_msg_color
- env:
- - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_HANDLER_TASK_START_MSG_COLOR
- vars:
- - name: ansible_callback_diy_playbook_on_handler_task_start_msg_color
- type: str
+ playbook_on_handler_task_start_msg_color:
+ description:
+ - Output color to be used for O(playbook_on_handler_task_start_msg).
+ - Template should render a L(valid color value,#notes).
+ ini:
+ - section: callback_diy
+ key: playbook_on_handler_task_start_msg_color
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_HANDLER_TASK_START_MSG_COLOR
+ vars:
+ - name: ansible_callback_diy_playbook_on_handler_task_start_msg_color
+ type: str
- playbook_on_vars_prompt_msg:
- description: Output to be used for callback playbook_on_vars_prompt.
- ini:
- - section: callback_diy
- key: playbook_on_vars_prompt_msg
- env:
- - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_VARS_PROMPT_MSG
- vars:
- - name: ansible_callback_diy_playbook_on_vars_prompt_msg
- type: str
+ playbook_on_vars_prompt_msg:
+ description: Output to be used for callback playbook_on_vars_prompt.
+ ini:
+ - section: callback_diy
+ key: playbook_on_vars_prompt_msg
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_VARS_PROMPT_MSG
+ vars:
+ - name: ansible_callback_diy_playbook_on_vars_prompt_msg
+ type: str
- playbook_on_vars_prompt_msg_color:
- description:
- - Output color to be used for I(playbook_on_vars_prompt_msg).
- - Template should render a L(valid color value,#notes).
- ini:
- - section: callback_diy
- key: playbook_on_vars_prompt_msg_color
- env:
- - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_VARS_PROMPT_MSG_COLOR
- vars:
- - name: ansible_callback_diy_playbook_on_vars_prompt_msg_color
- type: str
+ playbook_on_vars_prompt_msg_color:
+ description:
+ - Output color to be used for O(playbook_on_vars_prompt_msg).
+ - Template should render a L(valid color value,#notes).
+ ini:
+ - section: callback_diy
+ key: playbook_on_vars_prompt_msg_color
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_VARS_PROMPT_MSG_COLOR
+ vars:
+ - name: ansible_callback_diy_playbook_on_vars_prompt_msg_color
+ type: str
- playbook_on_play_start_msg:
- description: Output to be used for callback playbook_on_play_start.
- ini:
- - section: callback_diy
- key: playbook_on_play_start_msg
- env:
- - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_PLAY_START_MSG
- vars:
- - name: ansible_callback_diy_playbook_on_play_start_msg
- type: str
+ playbook_on_play_start_msg:
+ description: Output to be used for callback playbook_on_play_start.
+ ini:
+ - section: callback_diy
+ key: playbook_on_play_start_msg
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_PLAY_START_MSG
+ vars:
+ - name: ansible_callback_diy_playbook_on_play_start_msg
+ type: str
- playbook_on_play_start_msg_color:
- description:
- - Output color to be used for I(playbook_on_play_start_msg).
- - Template should render a L(valid color value,#notes).
- ini:
- - section: callback_diy
- key: playbook_on_play_start_msg_color
- env:
- - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_PLAY_START_MSG_COLOR
- vars:
- - name: ansible_callback_diy_playbook_on_play_start_msg_color
- type: str
+ playbook_on_play_start_msg_color:
+ description:
+ - Output color to be used for O(playbook_on_play_start_msg).
+ - Template should render a L(valid color value,#notes).
+ ini:
+ - section: callback_diy
+ key: playbook_on_play_start_msg_color
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_PLAY_START_MSG_COLOR
+ vars:
+ - name: ansible_callback_diy_playbook_on_play_start_msg_color
+ type: str
- playbook_on_stats_msg:
- description: Output to be used for callback playbook_on_stats.
- ini:
- - section: callback_diy
- key: playbook_on_stats_msg
- env:
- - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_STATS_MSG
- vars:
- - name: ansible_callback_diy_playbook_on_stats_msg
- type: str
+ playbook_on_stats_msg:
+ description: Output to be used for callback playbook_on_stats.
+ ini:
+ - section: callback_diy
+ key: playbook_on_stats_msg
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_STATS_MSG
+ vars:
+ - name: ansible_callback_diy_playbook_on_stats_msg
+ type: str
- playbook_on_stats_msg_color:
- description:
- - Output color to be used for I(playbook_on_stats_msg).
- - Template should render a L(valid color value,#notes).
- ini:
- - section: callback_diy
- key: playbook_on_stats_msg_color
- env:
- - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_STATS_MSG_COLOR
- vars:
- - name: ansible_callback_diy_playbook_on_stats_msg_color
- type: str
+ playbook_on_stats_msg_color:
+ description:
+ - Output color to be used for O(playbook_on_stats_msg).
+ - Template should render a L(valid color value,#notes).
+ ini:
+ - section: callback_diy
+ key: playbook_on_stats_msg_color
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_STATS_MSG_COLOR
+ vars:
+ - name: ansible_callback_diy_playbook_on_stats_msg_color
+ type: str
- on_file_diff_msg:
- description: Output to be used for callback on_file_diff.
- ini:
- - section: callback_diy
- key: on_file_diff_msg
- env:
- - name: ANSIBLE_CALLBACK_DIY_ON_FILE_DIFF_MSG
- vars:
- - name: ansible_callback_diy_on_file_diff_msg
- type: str
+ on_file_diff_msg:
+ description: Output to be used for callback on_file_diff.
+ ini:
+ - section: callback_diy
+ key: on_file_diff_msg
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_ON_FILE_DIFF_MSG
+ vars:
+ - name: ansible_callback_diy_on_file_diff_msg
+ type: str
- on_file_diff_msg_color:
- description:
- - Output color to be used for I(on_file_diff_msg).
- - Template should render a L(valid color value,#notes).
- ini:
- - section: callback_diy
- key: on_file_diff_msg_color
- env:
- - name: ANSIBLE_CALLBACK_DIY_ON_FILE_DIFF_MSG_COLOR
- vars:
- - name: ansible_callback_diy_on_file_diff_msg_color
- type: str
+ on_file_diff_msg_color:
+ description:
+ - Output color to be used for O(on_file_diff_msg).
+ - Template should render a L(valid color value,#notes).
+ ini:
+ - section: callback_diy
+ key: on_file_diff_msg_color
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_ON_FILE_DIFF_MSG_COLOR
+ vars:
+ - name: ansible_callback_diy_on_file_diff_msg_color
+ type: str
- playbook_on_include_msg:
- description: Output to be used for callback playbook_on_include.
- ini:
- - section: callback_diy
- key: playbook_on_include_msg
- env:
- - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_INCLUDE_MSG
- vars:
- - name: ansible_callback_diy_playbook_on_include_msg
- type: str
+ playbook_on_include_msg:
+ description: Output to be used for callback playbook_on_include.
+ ini:
+ - section: callback_diy
+ key: playbook_on_include_msg
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_INCLUDE_MSG
+ vars:
+ - name: ansible_callback_diy_playbook_on_include_msg
+ type: str
- playbook_on_include_msg_color:
- description:
- - Output color to be used for I(playbook_on_include_msg).
- - Template should render a L(valid color value,#notes).
- ini:
- - section: callback_diy
- key: playbook_on_include_msg_color
- env:
- - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_INCLUDE_MSG_COLOR
- vars:
- - name: ansible_callback_diy_playbook_on_include_msg_color
- type: str
+ playbook_on_include_msg_color:
+ description:
+ - Output color to be used for O(playbook_on_include_msg).
+ - Template should render a L(valid color value,#notes).
+ ini:
+ - section: callback_diy
+ key: playbook_on_include_msg_color
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_INCLUDE_MSG_COLOR
+ vars:
+ - name: ansible_callback_diy_playbook_on_include_msg_color
+ type: str
- runner_item_on_ok_msg:
- description: Output to be used for callback runner_item_on_ok.
- ini:
- - section: callback_diy
- key: runner_item_on_ok_msg
- env:
- - name: ANSIBLE_CALLBACK_DIY_RUNNER_ITEM_ON_OK_MSG
- vars:
- - name: ansible_callback_diy_runner_item_on_ok_msg
- type: str
+ runner_item_on_ok_msg:
+ description: Output to be used for callback runner_item_on_ok.
+ ini:
+ - section: callback_diy
+ key: runner_item_on_ok_msg
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_RUNNER_ITEM_ON_OK_MSG
+ vars:
+ - name: ansible_callback_diy_runner_item_on_ok_msg
+ type: str
- runner_item_on_ok_msg_color:
- description:
- - Output color to be used for I(runner_item_on_ok_msg).
- - Template should render a L(valid color value,#notes).
- ini:
- - section: callback_diy
- key: runner_item_on_ok_msg_color
- env:
- - name: ANSIBLE_CALLBACK_DIY_RUNNER_ITEM_ON_OK_MSG_COLOR
- vars:
- - name: ansible_callback_diy_runner_item_on_ok_msg_color
- type: str
+ runner_item_on_ok_msg_color:
+ description:
+ - Output color to be used for O(runner_item_on_ok_msg).
+ - Template should render a L(valid color value,#notes).
+ ini:
+ - section: callback_diy
+ key: runner_item_on_ok_msg_color
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_RUNNER_ITEM_ON_OK_MSG_COLOR
+ vars:
+ - name: ansible_callback_diy_runner_item_on_ok_msg_color
+ type: str
- runner_item_on_failed_msg:
- description: Output to be used for callback runner_item_on_failed.
- ini:
- - section: callback_diy
- key: runner_item_on_failed_msg
- env:
- - name: ANSIBLE_CALLBACK_DIY_RUNNER_ITEM_ON_FAILED_MSG
- vars:
- - name: ansible_callback_diy_runner_item_on_failed_msg
- type: str
+ runner_item_on_failed_msg:
+ description: Output to be used for callback runner_item_on_failed.
+ ini:
+ - section: callback_diy
+ key: runner_item_on_failed_msg
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_RUNNER_ITEM_ON_FAILED_MSG
+ vars:
+ - name: ansible_callback_diy_runner_item_on_failed_msg
+ type: str
- runner_item_on_failed_msg_color:
- description:
- - Output color to be used for I(runner_item_on_failed_msg).
- - Template should render a L(valid color value,#notes).
- ini:
- - section: callback_diy
- key: runner_item_on_failed_msg_color
- env:
- - name: ANSIBLE_CALLBACK_DIY_RUNNER_ITEM_ON_FAILED_MSG_COLOR
- vars:
- - name: ansible_callback_diy_runner_item_on_failed_msg_color
- type: str
+ runner_item_on_failed_msg_color:
+ description:
+ - Output color to be used for O(runner_item_on_failed_msg).
+ - Template should render a L(valid color value,#notes).
+ ini:
+ - section: callback_diy
+ key: runner_item_on_failed_msg_color
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_RUNNER_ITEM_ON_FAILED_MSG_COLOR
+ vars:
+ - name: ansible_callback_diy_runner_item_on_failed_msg_color
+ type: str
- runner_item_on_skipped_msg:
- description: Output to be used for callback runner_item_on_skipped.
- ini:
- - section: callback_diy
- key: runner_item_on_skipped_msg
- env:
- - name: ANSIBLE_CALLBACK_DIY_RUNNER_ITEM_ON_SKIPPED_MSG
- vars:
- - name: ansible_callback_diy_runner_item_on_skipped_msg
- type: str
+ runner_item_on_skipped_msg:
+ description: Output to be used for callback runner_item_on_skipped.
+ ini:
+ - section: callback_diy
+ key: runner_item_on_skipped_msg
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_RUNNER_ITEM_ON_SKIPPED_MSG
+ vars:
+ - name: ansible_callback_diy_runner_item_on_skipped_msg
+ type: str
- runner_item_on_skipped_msg_color:
- description:
- - Output color to be used for I(runner_item_on_skipped_msg).
- - Template should render a L(valid color value,#notes).
- ini:
- - section: callback_diy
- key: runner_item_on_skipped_msg_color
- env:
- - name: ANSIBLE_CALLBACK_DIY_RUNNER_ITEM_ON_SKIPPED_MSG_COLOR
- vars:
- - name: ansible_callback_diy_runner_item_on_skipped_msg_color
- type: str
+ runner_item_on_skipped_msg_color:
+ description:
+ - Output color to be used for O(runner_item_on_skipped_msg).
+ - Template should render a L(valid color value,#notes).
+ ini:
+ - section: callback_diy
+ key: runner_item_on_skipped_msg_color
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_RUNNER_ITEM_ON_SKIPPED_MSG_COLOR
+ vars:
+ - name: ansible_callback_diy_runner_item_on_skipped_msg_color
+ type: str
- runner_retry_msg:
- description: Output to be used for callback runner_retry.
- ini:
- - section: callback_diy
- key: runner_retry_msg
- env:
- - name: ANSIBLE_CALLBACK_DIY_RUNNER_RETRY_MSG
- vars:
- - name: ansible_callback_diy_runner_retry_msg
- type: str
+ runner_retry_msg:
+ description: Output to be used for callback runner_retry.
+ ini:
+ - section: callback_diy
+ key: runner_retry_msg
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_RUNNER_RETRY_MSG
+ vars:
+ - name: ansible_callback_diy_runner_retry_msg
+ type: str
- runner_retry_msg_color:
- description:
- - Output color to be used for I(runner_retry_msg).
- - Template should render a L(valid color value,#notes).
- ini:
- - section: callback_diy
- key: runner_retry_msg_color
- env:
- - name: ANSIBLE_CALLBACK_DIY_RUNNER_RETRY_MSG_COLOR
- vars:
- - name: ansible_callback_diy_runner_retry_msg_color
- type: str
+ runner_retry_msg_color:
+ description:
+ - Output color to be used for O(runner_retry_msg).
+ - Template should render a L(valid color value,#notes).
+ ini:
+ - section: callback_diy
+ key: runner_retry_msg_color
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_RUNNER_RETRY_MSG_COLOR
+ vars:
+ - name: ansible_callback_diy_runner_retry_msg_color
+ type: str
- runner_on_start_msg:
- description: Output to be used for callback runner_on_start.
- ini:
- - section: callback_diy
- key: runner_on_start_msg
- env:
- - name: ANSIBLE_CALLBACK_DIY_RUNNER_ON_START_MSG
- vars:
- - name: ansible_callback_diy_runner_on_start_msg
- type: str
+ runner_on_start_msg:
+ description: Output to be used for callback runner_on_start.
+ ini:
+ - section: callback_diy
+ key: runner_on_start_msg
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_RUNNER_ON_START_MSG
+ vars:
+ - name: ansible_callback_diy_runner_on_start_msg
+ type: str
- runner_on_start_msg_color:
- description:
- - Output color to be used for I(runner_on_start_msg).
- - Template should render a L(valid color value,#notes).
- ini:
- - section: callback_diy
- key: runner_on_start_msg_color
- env:
- - name: ANSIBLE_CALLBACK_DIY_RUNNER_ON_START_MSG_COLOR
- vars:
- - name: ansible_callback_diy_runner_on_start_msg_color
- type: str
+ runner_on_start_msg_color:
+ description:
+ - Output color to be used for O(runner_on_start_msg).
+ - Template should render a L(valid color value,#notes).
+ ini:
+ - section: callback_diy
+ key: runner_on_start_msg_color
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_RUNNER_ON_START_MSG_COLOR
+ vars:
+ - name: ansible_callback_diy_runner_on_start_msg_color
+ type: str
- runner_on_no_hosts_msg:
- description: Output to be used for callback runner_on_no_hosts.
- ini:
- - section: callback_diy
- key: runner_on_no_hosts_msg
- env:
- - name: ANSIBLE_CALLBACK_DIY_RUNNER_ON_NO_HOSTS_MSG
- vars:
- - name: ansible_callback_diy_runner_on_no_hosts_msg
- type: str
+ runner_on_no_hosts_msg:
+ description: Output to be used for callback runner_on_no_hosts.
+ ini:
+ - section: callback_diy
+ key: runner_on_no_hosts_msg
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_RUNNER_ON_NO_HOSTS_MSG
+ vars:
+ - name: ansible_callback_diy_runner_on_no_hosts_msg
+ type: str
- runner_on_no_hosts_msg_color:
- description:
- - Output color to be used for I(runner_on_no_hosts_msg).
- - Template should render a L(valid color value,#notes).
- ini:
- - section: callback_diy
- key: runner_on_no_hosts_msg_color
- env:
- - name: ANSIBLE_CALLBACK_DIY_RUNNER_ON_NO_HOSTS_MSG_COLOR
- vars:
- - name: ansible_callback_diy_runner_on_no_hosts_msg_color
- type: str
+ runner_on_no_hosts_msg_color:
+ description:
+ - Output color to be used for O(runner_on_no_hosts_msg).
+ - Template should render a L(valid color value,#notes).
+ ini:
+ - section: callback_diy
+ key: runner_on_no_hosts_msg_color
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_RUNNER_ON_NO_HOSTS_MSG_COLOR
+ vars:
+ - name: ansible_callback_diy_runner_on_no_hosts_msg_color
+ type: str
- playbook_on_setup_msg:
- description: Output to be used for callback playbook_on_setup.
- ini:
- - section: callback_diy
- key: playbook_on_setup_msg
- env:
- - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_SETUP_MSG
- vars:
- - name: ansible_callback_diy_playbook_on_setup_msg
- type: str
+ playbook_on_setup_msg:
+ description: Output to be used for callback playbook_on_setup.
+ ini:
+ - section: callback_diy
+ key: playbook_on_setup_msg
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_SETUP_MSG
+ vars:
+ - name: ansible_callback_diy_playbook_on_setup_msg
+ type: str
- playbook_on_setup_msg_color:
- description:
- - Output color to be used for I(playbook_on_setup_msg).
- - Template should render a L(valid color value,#notes).
- ini:
- - section: callback_diy
- key: playbook_on_setup_msg_color
- env:
- - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_SETUP_MSG_COLOR
- vars:
- - name: ansible_callback_diy_playbook_on_setup_msg_color
- type: str
-'''
+ playbook_on_setup_msg_color:
+ description:
+ - Output color to be used for O(playbook_on_setup_msg).
+ - Template should render a L(valid color value,#notes).
+ ini:
+ - section: callback_diy
+ key: playbook_on_setup_msg_color
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_SETUP_MSG_COLOR
+ vars:
+ - name: ansible_callback_diy_playbook_on_setup_msg_color
+ type: str
+"""
-EXAMPLES = r'''
+EXAMPLES = r"""
ansible.cfg: >
# Enable plugin
[defaults]
@@ -623,11 +616,11 @@ ansible.cfg: >
# Newline after every callback
# on_any_msg='{{ " " | join("\n") }}'
-playbook.yml: >
+playbook.yml: >-
---
- name: "Default plugin output: play example"
hosts: localhost
- gather_facts: no
+ gather_facts: false
tasks:
- name: Default plugin output
ansible.builtin.debug:
@@ -635,7 +628,7 @@ playbook.yml: >
- name: Override from play vars
hosts: localhost
- gather_facts: no
+ gather_facts: false
vars:
ansible_connection: local
green: "\e[0m\e[38;5;82m"
@@ -713,7 +706,7 @@ playbook.yml: >
- name: Using alias vars (see ansible.cfg)
ansible.builtin.debug:
msg:
- when: False
+ when: false
vars:
ansible_callback_diy_playbook_on_task_start_msg: ""
on_skipped_msg: "DIY output(via task vars): skipped example:\n\e[0m\e[38;5;4m\u25b6\u25b6 {{ ansible_callback_diy.result.task.name }}\n"
@@ -782,19 +775,21 @@ playbook.yml: >
{{ white }}{{ ansible_callback_diy[key] }}
{% endfor %}
-'''
+"""
import sys
from contextlib import contextmanager
-from ansible import constants as C
-from ansible.playbook.task_include import TaskInclude
-from ansible.plugins.callback import CallbackBase
-from ansible.utils.color import colorize, hostcolor
from ansible.template import Templar
from ansible.vars.manager import VariableManager
from ansible.plugins.callback.default import CallbackModule as Default
from ansible.module_utils.common.text.converters import to_text
+try:
+ from ansible.template import trust_as_template # noqa: F401, pylint: disable=unused-import
+ SUPPORTS_DATA_TAGGING = True
+except ImportError:
+ SUPPORTS_DATA_TAGGING = False
+
class DummyStdout(object):
def flush(self):
@@ -832,9 +827,9 @@ class CallbackModule(Default):
_callback_options = ['msg', 'msg_color']
for option in _callback_options:
- _option_name = '%s_%s' % (_callback_type, option)
+ _option_name = f'{_callback_type}_{option}'
_option_template = variables.get(
- self.DIY_NS + "_" + _option_name,
+ f"{self.DIY_NS}_{_option_name}",
self.get_option(_option_name)
)
_ret.update({option: self._template(
@@ -848,7 +843,10 @@ class CallbackModule(Default):
return _ret
def _using_diy(self, spec):
- return (spec['msg'] is not None) and (spec['msg'] != spec['vars']['omit'])
+ sentinel = object()
+ omit = spec['vars'].get('omit', sentinel)
+ # With Data Tagging, omit is sentinel
+ return (spec['msg'] is not None) and (spec['msg'] != omit or omit is sentinel)
def _parent_has_callback(self):
return hasattr(super(CallbackModule, self), sys._getframe(1).f_code.co_name)
@@ -871,7 +869,7 @@ class CallbackModule(Default):
handler=None, result=None, stats=None, remove_attr_ref_loop=True):
def _get_value(obj, attr=None, method=None):
if attr:
- return getattr(obj, attr, getattr(obj, "_" + attr, None))
+ return getattr(obj, attr, getattr(obj, f"_{attr}", None))
if method:
_method = getattr(obj, method)
@@ -904,7 +902,7 @@ class CallbackModule(Default):
)
_ret.update(_all)
- _ret.update(_ret.get(self.DIY_NS, {self.DIY_NS: CallbackDIYDict()}))
+ _ret.update(_ret.get(self.DIY_NS, {self.DIY_NS: {} if SUPPORTS_DATA_TAGGING else CallbackDIYDict()}))
_ret[self.DIY_NS].update({'playbook': {}})
_playbook_attributes = ['entries', 'file_name', 'basedir']
diff --git a/plugins/callback/elastic.py b/plugins/callback/elastic.py
index 37526c155d..82478b9e7d 100644
--- a/plugins/callback/elastic.py
+++ b/plugins/callback/elastic.py
@@ -2,72 +2,71 @@
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
-DOCUMENTATION = '''
- author: Victor Martinez (@v1v)
- name: elastic
- type: notification
- short_description: Create distributed traces for each Ansible task in Elastic APM
- version_added: 3.8.0
+DOCUMENTATION = r"""
+author: Victor Martinez (@v1v)
+name: elastic
+type: notification
+short_description: Create distributed traces for each Ansible task in Elastic APM
+version_added: 3.8.0
+description:
+ - This callback creates distributed traces for each Ansible task in Elastic APM.
+ - You can configure the plugin with environment variables.
+ - See U(https://www.elastic.co/guide/en/apm/agent/python/current/configuration.html).
+options:
+ hide_task_arguments:
+ default: false
+ type: bool
description:
- - This callback creates distributed traces for each Ansible task in Elastic APM.
- - You can configure the plugin with environment variables.
- - See U(https://www.elastic.co/guide/en/apm/agent/python/current/configuration.html).
- options:
- hide_task_arguments:
- default: false
- type: bool
- description:
- - Hide the arguments for a task.
- env:
- - name: ANSIBLE_OPENTELEMETRY_HIDE_TASK_ARGUMENTS
- apm_service_name:
- default: ansible
- type: str
- description:
- - The service name resource attribute.
- env:
- - name: ELASTIC_APM_SERVICE_NAME
- apm_server_url:
- type: str
- description:
- - Use the APM server and its environment variables.
- env:
- - name: ELASTIC_APM_SERVER_URL
- apm_secret_token:
- type: str
- description:
- - Use the APM server token
- env:
- - name: ELASTIC_APM_SECRET_TOKEN
- apm_api_key:
- type: str
- description:
- - Use the APM API key
- env:
- - name: ELASTIC_APM_API_KEY
- apm_verify_server_cert:
- default: true
- type: bool
- description:
- - Verifies the SSL certificate if an HTTPS connection.
- env:
- - name: ELASTIC_APM_VERIFY_SERVER_CERT
- traceparent:
- type: str
- description:
- - The L(W3C Trace Context header traceparent,https://www.w3.org/TR/trace-context-1/#traceparent-header).
- env:
- - name: TRACEPARENT
- requirements:
- - elastic-apm (Python library)
-'''
+ - Hide the arguments for a task.
+ env:
+ - name: ANSIBLE_OPENTELEMETRY_HIDE_TASK_ARGUMENTS
+ apm_service_name:
+ default: ansible
+ type: str
+ description:
+ - The service name resource attribute.
+ env:
+ - name: ELASTIC_APM_SERVICE_NAME
+ apm_server_url:
+ type: str
+ description:
+ - Use the APM server and its environment variables.
+ env:
+ - name: ELASTIC_APM_SERVER_URL
+ apm_secret_token:
+ type: str
+ description:
+ - Use the APM server token.
+ env:
+ - name: ELASTIC_APM_SECRET_TOKEN
+ apm_api_key:
+ type: str
+ description:
+ - Use the APM API key.
+ env:
+ - name: ELASTIC_APM_API_KEY
+ apm_verify_server_cert:
+ default: true
+ type: bool
+ description:
+ - Verifies the SSL certificate if an HTTPS connection.
+ env:
+ - name: ELASTIC_APM_VERIFY_SERVER_CERT
+ traceparent:
+ type: str
+ description:
+ - The L(W3C Trace Context header traceparent,https://www.w3.org/TR/trace-context-1/#traceparent-header).
+ env:
+ - name: TRACEPARENT
+requirements:
+ - elastic-apm (Python library)
+"""
-EXAMPLES = '''
-examples: |
+EXAMPLES = r"""
+examples: |-
Enable the plugin in ansible.cfg:
[defaults]
callbacks_enabled = community.general.elastic
@@ -76,7 +75,7 @@ examples: |
export ELASTIC_APM_SERVER_URL=
export ELASTIC_APM_SERVICE_NAME=your_service_name
export ELASTIC_APM_API_KEY=your_APM_API_KEY
-'''
+"""
import getpass
import socket
@@ -84,10 +83,11 @@ import time
import uuid
from collections import OrderedDict
+from contextlib import closing
from os.path import basename
from ansible.errors import AnsibleError, AnsibleRuntimeError
-from ansible.module_utils.six import raise_from
+from ansible.module_utils.ansible_release import __version__ as ansible_version
from ansible.plugins.callback import CallbackBase
try:
@@ -117,7 +117,7 @@ class TaskData:
if host.uuid in self.host_data:
if host.status == 'included':
# concatenate task include output from multiple items
- host.result = '%s\n%s' % (self.host_data[host.uuid].result, host.result)
+ host.result = f'{self.host_data[host.uuid].result}\n{host.result}'
else:
return
@@ -140,7 +140,6 @@ class HostData:
class ElasticSource(object):
def __init__(self, display):
self.ansible_playbook = ""
- self.ansible_version = None
self.session = str(uuid.uuid4())
self.host = socket.gethostname()
try:
@@ -165,7 +164,7 @@ class ElasticSource(object):
args = None
if not task.no_log and not hide_task_arguments:
- args = ', '.join(('%s=%s' % a for a in task.args.items()))
+ args = ', '.join((f'{k}={v}' for k, v in task.args.items()))
tasks_data[uuid] = TaskData(uuid, name, path, play_name, action, args)
@@ -183,9 +182,6 @@ class ElasticSource(object):
task = tasks_data[task_uuid]
- if self.ansible_version is None and result._task_fields['args'].get('_ansible_version'):
- self.ansible_version = result._task_fields['args'].get('_ansible_version')
-
task.add_host(HostData(host_uuid, host_name, status, result))
def generate_distributed_traces(self, tasks_data, status, end_time, traceparent, apm_service_name,
@@ -201,29 +197,29 @@ class ElasticSource(object):
apm_cli = self.init_apm_client(apm_server_url, apm_service_name, apm_verify_server_cert, apm_secret_token, apm_api_key)
if apm_cli:
- instrument() # Only call this once, as early as possible.
- if traceparent:
- parent = trace_parent_from_string(traceparent)
- apm_cli.begin_transaction("Session", trace_parent=parent, start=parent_start_time)
- else:
- apm_cli.begin_transaction("Session", start=parent_start_time)
- # Populate trace metadata attributes
- if self.ansible_version is not None:
- label(ansible_version=self.ansible_version)
- label(ansible_session=self.session, ansible_host_name=self.host, ansible_host_user=self.user)
- if self.ip_address is not None:
- label(ansible_host_ip=self.ip_address)
+ with closing(apm_cli):
+ instrument() # Only call this once, as early as possible.
+ if traceparent:
+ parent = trace_parent_from_string(traceparent)
+ apm_cli.begin_transaction("Session", trace_parent=parent, start=parent_start_time)
+ else:
+ apm_cli.begin_transaction("Session", start=parent_start_time)
+ # Populate trace metadata attributes
+ label(ansible_version=ansible_version)
+ label(ansible_session=self.session, ansible_host_name=self.host, ansible_host_user=self.user)
+ if self.ip_address is not None:
+ label(ansible_host_ip=self.ip_address)
- for task_data in tasks:
- for host_uuid, host_data in task_data.host_data.items():
- self.create_span_data(apm_cli, task_data, host_data)
+ for task_data in tasks:
+ for host_uuid, host_data in task_data.host_data.items():
+ self.create_span_data(apm_cli, task_data, host_data)
- apm_cli.end_transaction(name=__name__, result=status, duration=end_time - parent_start_time)
+ apm_cli.end_transaction(name=__name__, result=status, duration=end_time - parent_start_time)
def create_span_data(self, apm_cli, task_data, host_data):
""" create the span with the given TaskData and HostData """
- name = '[%s] %s: %s' % (host_data.name, task_data.play, task_data.name)
+ name = f'[{host_data.name}] {task_data.play}: {task_data.name}'
message = "success"
status = "success"
@@ -257,7 +253,7 @@ class ElasticSource(object):
"ansible.task.host.status": host_data.status}) as span:
span.outcome = status
if 'failure' in status:
- exception = AnsibleRuntimeError(message="{0}: {1} failed with error message {2}".format(task_data.action, name, enriched_error_message))
+ exception = AnsibleRuntimeError(message=f"{task_data.action}: {name} failed with error message {enriched_error_message}")
apm_cli.capture_exception(exc_info=(type(exception), exception, exception.__traceback__), handled=True)
def init_apm_client(self, apm_server_url, apm_service_name, apm_verify_server_cert, apm_secret_token, apm_api_key):
@@ -286,7 +282,7 @@ class ElasticSource(object):
message = result.get('msg', 'failed')
exception = result.get('exception')
stderr = result.get('stderr')
- return ('message: "{0}"\nexception: "{1}"\nstderr: "{2}"').format(message, exception, stderr)
+ return f"message: \"{message}\"\nexception: \"{exception}\"\nstderr: \"{stderr}\""
class CallbackModule(CallbackBase):
@@ -311,9 +307,7 @@ class CallbackModule(CallbackBase):
self.disabled = False
if ELASTIC_LIBRARY_IMPORT_ERROR:
- raise_from(
- AnsibleError('The `elastic-apm` must be installed to use this plugin'),
- ELASTIC_LIBRARY_IMPORT_ERROR)
+ raise AnsibleError('The `elastic-apm` must be installed to use this plugin') from ELASTIC_LIBRARY_IMPORT_ERROR
self.tasks_data = OrderedDict()
diff --git a/plugins/callback/hipchat.py b/plugins/callback/hipchat.py
deleted file mode 100644
index dc70789dbe..0000000000
--- a/plugins/callback/hipchat.py
+++ /dev/null
@@ -1,229 +0,0 @@
-# -*- coding: utf-8 -*-
-# Copyright (c) 2014, Matt Martz
-# Copyright (c) 2017 Ansible Project
-# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-DOCUMENTATION = '''
- author: Unknown (!UNKNOWN)
- name: hipchat
- type: notification
- requirements:
- - whitelist in configuration.
- - prettytable (python lib)
- short_description: post task events to hipchat
- description:
- - This callback plugin sends status updates to a HipChat channel during playbook execution.
- - Before 2.4 only environment variables were available for configuring this plugin.
- options:
- token:
- description: HipChat API token for v1 or v2 API.
- required: True
- env:
- - name: HIPCHAT_TOKEN
- ini:
- - section: callback_hipchat
- key: token
- api_version:
- description: HipChat API version, v1 or v2.
- required: False
- default: v1
- env:
- - name: HIPCHAT_API_VERSION
- ini:
- - section: callback_hipchat
- key: api_version
- room:
- description: HipChat room to post in.
- default: ansible
- env:
- - name: HIPCHAT_ROOM
- ini:
- - section: callback_hipchat
- key: room
- from:
- description: Name to post as
- default: ansible
- env:
- - name: HIPCHAT_FROM
- ini:
- - section: callback_hipchat
- key: from
- notify:
- description: Add notify flag to important messages
- type: bool
- default: True
- env:
- - name: HIPCHAT_NOTIFY
- ini:
- - section: callback_hipchat
- key: notify
-
-'''
-
-import os
-import json
-
-try:
- import prettytable
- HAS_PRETTYTABLE = True
-except ImportError:
- HAS_PRETTYTABLE = False
-
-from ansible.plugins.callback import CallbackBase
-from ansible.module_utils.six.moves.urllib.parse import urlencode
-from ansible.module_utils.urls import open_url
-
-
-class CallbackModule(CallbackBase):
- """This is an example ansible callback plugin that sends status
- updates to a HipChat channel during playbook execution.
- """
-
- CALLBACK_VERSION = 2.0
- CALLBACK_TYPE = 'notification'
- CALLBACK_NAME = 'community.general.hipchat'
- CALLBACK_NEEDS_WHITELIST = True
-
- API_V1_URL = 'https://api.hipchat.com/v1/rooms/message'
- API_V2_URL = 'https://api.hipchat.com/v2/'
-
- def __init__(self):
-
- super(CallbackModule, self).__init__()
-
- if not HAS_PRETTYTABLE:
- self.disabled = True
- self._display.warning('The `prettytable` python module is not installed. '
- 'Disabling the HipChat callback plugin.')
- self.printed_playbook = False
- self.playbook_name = None
- self.play = None
-
- def set_options(self, task_keys=None, var_options=None, direct=None):
- super(CallbackModule, self).set_options(task_keys=task_keys, var_options=var_options, direct=direct)
-
- self.token = self.get_option('token')
- self.api_version = self.get_option('api_version')
- self.from_name = self.get_option('from')
- self.allow_notify = self.get_option('notify')
- self.room = self.get_option('room')
-
- if self.token is None:
- self.disabled = True
- self._display.warning('HipChat token could not be loaded. The HipChat '
- 'token can be provided using the `HIPCHAT_TOKEN` '
- 'environment variable.')
-
- # Pick the request handler.
- if self.api_version == 'v2':
- self.send_msg = self.send_msg_v2
- else:
- self.send_msg = self.send_msg_v1
-
- def send_msg_v2(self, msg, msg_format='text', color='yellow', notify=False):
- """Method for sending a message to HipChat"""
-
- headers = {'Authorization': 'Bearer %s' % self.token, 'Content-Type': 'application/json'}
-
- body = {}
- body['room_id'] = self.room
- body['from'] = self.from_name[:15] # max length is 15
- body['message'] = msg
- body['message_format'] = msg_format
- body['color'] = color
- body['notify'] = self.allow_notify and notify
-
- data = json.dumps(body)
- url = self.API_V2_URL + "room/{room_id}/notification".format(room_id=self.room)
- try:
- response = open_url(url, data=data, headers=headers, method='POST')
- return response.read()
- except Exception as ex:
- self._display.warning('Could not submit message to hipchat: {0}'.format(ex))
-
- def send_msg_v1(self, msg, msg_format='text', color='yellow', notify=False):
- """Method for sending a message to HipChat"""
-
- params = {}
- params['room_id'] = self.room
- params['from'] = self.from_name[:15] # max length is 15
- params['message'] = msg
- params['message_format'] = msg_format
- params['color'] = color
- params['notify'] = int(self.allow_notify and notify)
-
- url = ('%s?auth_token=%s' % (self.API_V1_URL, self.token))
- try:
- response = open_url(url, data=urlencode(params))
- return response.read()
- except Exception as ex:
- self._display.warning('Could not submit message to hipchat: {0}'.format(ex))
-
- def v2_playbook_on_play_start(self, play):
- """Display Playbook and play start messages"""
-
- self.play = play
- name = play.name
- # This block sends information about a playbook when it starts
- # The playbook object is not immediately available at
- # playbook_on_start so we grab it via the play
- #
- # Displays info about playbook being started by a person on an
- # inventory, as well as Tags, Skip Tags and Limits
- if not self.printed_playbook:
- self.playbook_name, dummy = os.path.splitext(os.path.basename(self.play.playbook.filename))
- host_list = self.play.playbook.inventory.host_list
- inventory = os.path.basename(os.path.realpath(host_list))
- self.send_msg("%s: Playbook initiated by %s against %s" %
- (self.playbook_name,
- self.play.playbook.remote_user,
- inventory), notify=True)
- self.printed_playbook = True
- subset = self.play.playbook.inventory._subset
- skip_tags = self.play.playbook.skip_tags
- self.send_msg("%s:\nTags: %s\nSkip Tags: %s\nLimit: %s" %
- (self.playbook_name,
- ', '.join(self.play.playbook.only_tags),
- ', '.join(skip_tags) if skip_tags else None,
- ', '.join(subset) if subset else subset))
-
- # This is where we actually say we are starting a play
- self.send_msg("%s: Starting play: %s" %
- (self.playbook_name, name))
-
- def playbook_on_stats(self, stats):
- """Display info about playbook statistics"""
- hosts = sorted(stats.processed.keys())
-
- t = prettytable.PrettyTable(['Host', 'Ok', 'Changed', 'Unreachable',
- 'Failures'])
-
- failures = False
- unreachable = False
-
- for h in hosts:
- s = stats.summarize(h)
-
- if s['failures'] > 0:
- failures = True
- if s['unreachable'] > 0:
- unreachable = True
-
- t.add_row([h] + [s[k] for k in ['ok', 'changed', 'unreachable',
- 'failures']])
-
- self.send_msg("%s: Playbook complete" % self.playbook_name,
- notify=True)
-
- if failures or unreachable:
- color = 'red'
- self.send_msg("%s: Failures detected" % self.playbook_name,
- color=color, notify=True)
- else:
- color = 'green'
-
- self.send_msg("/code %s:\n%s" % (self.playbook_name, t), color=color)
diff --git a/plugins/callback/jabber.py b/plugins/callback/jabber.py
index 3fd0b6fb97..319611d460 100644
--- a/plugins/callback/jabber.py
+++ b/plugins/callback/jabber.py
@@ -1,44 +1,46 @@
-# -*- coding: utf-8 -*-
# Copyright (C) 2016 maxn nikolaev.makc@gmail.com
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
-DOCUMENTATION = '''
- author: Unknown (!UNKNOWN)
- name: jabber
- type: notification
- short_description: post task events to a jabber server
- description:
- - The chatty part of ChatOps with a Hipchat server as a target
- - This callback plugin sends status updates to a HipChat channel during playbook execution.
- requirements:
- - xmpp (python lib https://github.com/ArchipelProject/xmpppy)
- options:
- server:
- description: connection info to jabber server
- required: True
- env:
- - name: JABBER_SERV
- user:
- description: Jabber user to authenticate as
- required: True
- env:
- - name: JABBER_USER
- password:
- description: Password for the user to the jabber server
- required: True
- env:
- - name: JABBER_PASS
- to:
- description: chat identifier that will receive the message
- required: True
- env:
- - name: JABBER_TO
-'''
+DOCUMENTATION = r"""
+author: Unknown (!UNKNOWN)
+name: jabber
+type: notification
+short_description: Post task events to a Jabber server
+description:
+ - The chatty part of ChatOps with a Hipchat server as a target.
+ - This callback plugin sends status updates to a HipChat channel during playbook execution.
+requirements:
+ - xmpp (Python library U(https://github.com/ArchipelProject/xmpppy))
+options:
+ server:
+ description: Connection info to Jabber server.
+ type: str
+ required: true
+ env:
+ - name: JABBER_SERV
+ user:
+ description: Jabber user to authenticate as.
+ type: str
+ required: true
+ env:
+ - name: JABBER_USER
+ password:
+ description: Password for the user to the Jabber server.
+ type: str
+ required: true
+ env:
+ - name: JABBER_PASS
+ to:
+ description: Chat identifier that receives the message.
+ type: str
+ required: true
+ env:
+ - name: JABBER_TO
+"""
import os
@@ -98,7 +100,7 @@ class CallbackModule(CallbackBase):
"""Display Playbook and play start messages"""
self.play = play
name = play.name
- self.send_msg("Ansible starting play: %s" % (name))
+ self.send_msg(f"Ansible starting play: {name}")
def playbook_on_stats(self, stats):
name = self.play
@@ -114,7 +116,7 @@ class CallbackModule(CallbackBase):
if failures or unreachable:
out = self.debug
- self.send_msg("%s: Failures detected \n%s \nHost: %s\n Failed at:\n%s" % (name, self.task, h, out))
+ self.send_msg(f"{name}: Failures detected \n{self.task} \nHost: {h}\n Failed at:\n{out}")
else:
out = self.debug
- self.send_msg("Great! \n Playbook %s completed:\n%s \n Last task debug:\n %s" % (name, s, out))
+ self.send_msg(f"Great! \n Playbook {name} completed:\n{s} \n Last task debug:\n {out}")
diff --git a/plugins/callback/log_plays.py b/plugins/callback/log_plays.py
index b1dc69364c..89ec8cbff3 100644
--- a/plugins/callback/log_plays.py
+++ b/plugins/callback/log_plays.py
@@ -1,32 +1,31 @@
-# -*- coding: utf-8 -*-
# Copyright (c) 2012, Michael DeHaan,
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
-DOCUMENTATION = '''
- author: Unknown (!UNKNOWN)
- name: log_plays
- type: notification
- short_description: write playbook output to log file
- description:
- - This callback writes playbook output to a file per host in the C(/var/log/ansible/hosts) directory
- requirements:
- - Whitelist in configuration
- - A writeable /var/log/ansible/hosts directory by the user executing Ansible on the controller
- options:
- log_folder:
- default: /var/log/ansible/hosts
- description: The folder where log files will be created.
- env:
- - name: ANSIBLE_LOG_FOLDER
- ini:
- - section: callback_log_plays
- key: log_folder
-'''
+DOCUMENTATION = r"""
+author: Unknown (!UNKNOWN)
+name: log_plays
+type: notification
+short_description: Write playbook output to log file
+description:
+ - This callback writes playbook output to a file per host in the C(/var/log/ansible/hosts) directory.
+requirements:
+ - Whitelist in configuration
+ - A writeable C(/var/log/ansible/hosts) directory by the user executing Ansible on the controller
+options:
+ log_folder:
+ default: /var/log/ansible/hosts
+ description: The folder where log files are created.
+ type: str
+ env:
+ - name: ANSIBLE_LOG_FOLDER
+ ini:
+ - section: callback_log_plays
+ key: log_folder
+"""
import os
import time
@@ -34,7 +33,7 @@ import json
from ansible.utils.path import makedirs_safe
from ansible.module_utils.common.text.converters import to_bytes
-from ansible.module_utils.common._collections_compat import MutableMapping
+from collections.abc import MutableMapping
from ansible.parsing.ajson import AnsibleJSONEncoder
from ansible.plugins.callback import CallbackBase
@@ -56,7 +55,10 @@ class CallbackModule(CallbackBase):
CALLBACK_NEEDS_WHITELIST = True
TIME_FORMAT = "%b %d %Y %H:%M:%S"
- MSG_FORMAT = "%(now)s - %(playbook)s - %(task_name)s - %(task_action)s - %(category)s - %(data)s\n\n"
+
+ @staticmethod
+ def _make_msg(now, playbook, task_name, task_action, category, data):
+ return f"{now} - {playbook} - {task_name} - {task_action} - {category} - {data}\n\n"
def __init__(self):
@@ -81,22 +83,12 @@ class CallbackModule(CallbackBase):
invocation = data.pop('invocation', None)
data = json.dumps(data, cls=AnsibleJSONEncoder)
if invocation is not None:
- data = json.dumps(invocation) + " => %s " % data
+ data = f"{json.dumps(invocation)} => {data} "
path = os.path.join(self.log_folder, result._host.get_name())
now = time.strftime(self.TIME_FORMAT, time.localtime())
- msg = to_bytes(
- self.MSG_FORMAT
- % dict(
- now=now,
- playbook=self.playbook,
- task_name=result._task.name,
- task_action=result._task.action,
- category=category,
- data=data,
- )
- )
+ msg = to_bytes(self._make_msg(now, self.playbook, result._task.name, result._task.action, category, data))
with open(path, "ab") as fd:
fd.write(msg)
diff --git a/plugins/callback/loganalytics.py b/plugins/callback/loganalytics.py
index 54acf846a3..05996f2492 100644
--- a/plugins/callback/loganalytics.py
+++ b/plugins/callback/loganalytics.py
@@ -1,44 +1,44 @@
-# -*- coding: utf-8 -*-
# Copyright (c) Ansible project
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
-DOCUMENTATION = '''
- name: loganalytics
- type: aggregate
- short_description: Posts task results to Azure Log Analytics
- author: "Cyrus Li (@zhcli) "
- description:
- - This callback plugin will post task results in JSON formatted to an Azure Log Analytics workspace.
- - Credits to authors of splunk callback plugin.
- version_added: "2.4.0"
- requirements:
- - Whitelisting this callback plugin.
- - An Azure log analytics work space has been established.
- options:
- workspace_id:
- description: Workspace ID of the Azure log analytics workspace.
- required: true
- env:
- - name: WORKSPACE_ID
- ini:
- - section: callback_loganalytics
- key: workspace_id
- shared_key:
- description: Shared key to connect to Azure log analytics workspace.
- required: true
- env:
- - name: WORKSPACE_SHARED_KEY
- ini:
- - section: callback_loganalytics
- key: shared_key
-'''
+DOCUMENTATION = r"""
+name: loganalytics
+type: notification
+short_description: Posts task results to Azure Log Analytics
+author: "Cyrus Li (@zhcli) "
+description:
+ - This callback plugin posts task results in JSON formatted to an Azure Log Analytics workspace.
+ - Credits to authors of splunk callback plugin.
+version_added: "2.4.0"
+requirements:
+ - Whitelisting this callback plugin.
+ - An Azure log analytics work space has been established.
+options:
+ workspace_id:
+ description: Workspace ID of the Azure log analytics workspace.
+ type: str
+ required: true
+ env:
+ - name: WORKSPACE_ID
+ ini:
+ - section: callback_loganalytics
+ key: workspace_id
+ shared_key:
+ description: Shared key to connect to Azure log analytics workspace.
+ type: str
+ required: true
+ env:
+ - name: WORKSPACE_SHARED_KEY
+ ini:
+ - section: callback_loganalytics
+ key: shared_key
+"""
-EXAMPLES = '''
-examples: |
+EXAMPLES = r"""
+examples: |-
Whitelist the plugin in ansible.cfg:
[defaults]
callback_whitelist = community.general.loganalytics
@@ -49,30 +49,32 @@ examples: |
[callback_loganalytics]
workspace_id = 01234567-0123-0123-0123-01234567890a
shared_key = dZD0kCbKl3ehZG6LHFMuhtE0yHiFCmetzFMc2u+roXIUQuatqU924SsAAAAPemhjbGlAemhjbGktTUJQAQIDBA==
-'''
+"""
import hashlib
import hmac
import base64
-import logging
import json
import uuid
import socket
import getpass
-from datetime import datetime
from os.path import basename
+from ansible.module_utils.ansible_release import __version__ as ansible_version
from ansible.module_utils.urls import open_url
from ansible.parsing.ajson import AnsibleJSONEncoder
from ansible.plugins.callback import CallbackBase
+from ansible_collections.community.general.plugins.module_utils.datetime import (
+ now,
+)
+
class AzureLogAnalyticsSource(object):
def __init__(self):
self.ansible_check_mode = False
self.ansible_playbook = ""
- self.ansible_version = ""
self.session = str(uuid.uuid4())
self.host = socket.gethostname()
self.user = getpass.getuser()
@@ -80,30 +82,25 @@ class AzureLogAnalyticsSource(object):
def __build_signature(self, date, workspace_id, shared_key, content_length):
# Build authorisation signature for Azure log analytics API call
- sigs = "POST\n{0}\napplication/json\nx-ms-date:{1}\n/api/logs".format(
- str(content_length), date)
+ sigs = f"POST\n{content_length}\napplication/json\nx-ms-date:{date}\n/api/logs"
utf8_sigs = sigs.encode('utf-8')
decoded_shared_key = base64.b64decode(shared_key)
hmac_sha256_sigs = hmac.new(
decoded_shared_key, utf8_sigs, digestmod=hashlib.sha256).digest()
encoded_hash = base64.b64encode(hmac_sha256_sigs).decode('utf-8')
- signature = "SharedKey {0}:{1}".format(workspace_id, encoded_hash)
+ signature = f"SharedKey {workspace_id}:{encoded_hash}"
return signature
def __build_workspace_url(self, workspace_id):
- return "https://{0}.ods.opinsights.azure.com/api/logs?api-version=2016-04-01".format(workspace_id)
+ return f"https://{workspace_id}.ods.opinsights.azure.com/api/logs?api-version=2016-04-01"
def __rfc1123date(self):
- return datetime.utcnow().strftime('%a, %d %b %Y %H:%M:%S GMT')
+ return now().strftime('%a, %d %b %Y %H:%M:%S GMT')
def send_event(self, workspace_id, shared_key, state, result, runtime):
if result._task_fields['args'].get('_ansible_check_mode') is True:
self.ansible_check_mode = True
- if result._task_fields['args'].get('_ansible_version'):
- self.ansible_version = \
- result._task_fields['args'].get('_ansible_version')
-
if result._task._role:
ansible_role = str(result._task._role)
else:
@@ -117,7 +114,7 @@ class AzureLogAnalyticsSource(object):
data['host'] = self.host
data['user'] = self.user
data['runtime'] = runtime
- data['ansible_version'] = self.ansible_version
+ data['ansible_version'] = ansible_version
data['ansible_check_mode'] = self.ansible_check_mode
data['ansible_host'] = result._host.name
data['ansible_playbook'] = self.ansible_playbook
@@ -155,7 +152,7 @@ class AzureLogAnalyticsSource(object):
class CallbackModule(CallbackBase):
CALLBACK_VERSION = 2.0
- CALLBACK_TYPE = 'aggregate'
+ CALLBACK_TYPE = 'notification'
CALLBACK_NAME = 'loganalytics'
CALLBACK_NEEDS_WHITELIST = True
@@ -168,7 +165,7 @@ class CallbackModule(CallbackBase):
def _seconds_since_start(self, result):
return (
- datetime.utcnow() -
+ now() -
self.start_datetimes[result._task._uuid]
).total_seconds()
@@ -186,10 +183,10 @@ class CallbackModule(CallbackBase):
self.loganalytics.ansible_playbook = basename(playbook._file_name)
def v2_playbook_on_task_start(self, task, is_conditional):
- self.start_datetimes[task._uuid] = datetime.utcnow()
+ self.start_datetimes[task._uuid] = now()
def v2_playbook_on_handler_task_start(self, task):
- self.start_datetimes[task._uuid] = datetime.utcnow()
+ self.start_datetimes[task._uuid] = now()
def v2_runner_on_ok(self, result, **kwargs):
self.loganalytics.send_event(
diff --git a/plugins/callback/logdna.py b/plugins/callback/logdna.py
index c84054c592..09d8b38dcb 100644
--- a/plugins/callback/logdna.py
+++ b/plugins/callback/logdna.py
@@ -1,61 +1,59 @@
-# -*- coding: utf-8 -*-
# Copyright (c) 2018, Samir Musali
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
-DOCUMENTATION = '''
- author: Unknown (!UNKNOWN)
- name: logdna
- type: aggregate
- short_description: Sends playbook logs to LogDNA
- description:
- - This callback will report logs from playbook actions, tasks, and events to LogDNA (https://app.logdna.com)
- requirements:
- - LogDNA Python Library (https://github.com/logdna/python)
- - whitelisting in configuration
- options:
- conf_key:
- required: True
- description: LogDNA Ingestion Key
- type: string
- env:
- - name: LOGDNA_INGESTION_KEY
- ini:
- - section: callback_logdna
- key: conf_key
- plugin_ignore_errors:
- required: False
- description: Whether to ignore errors on failing or not
- type: boolean
- env:
- - name: ANSIBLE_IGNORE_ERRORS
- ini:
- - section: callback_logdna
- key: plugin_ignore_errors
- default: False
- conf_hostname:
- required: False
- description: Alternative Host Name; the current host name by default
- type: string
- env:
- - name: LOGDNA_HOSTNAME
- ini:
- - section: callback_logdna
- key: conf_hostname
- conf_tags:
- required: False
- description: Tags
- type: string
- env:
- - name: LOGDNA_TAGS
- ini:
- - section: callback_logdna
- key: conf_tags
- default: ansible
-'''
+DOCUMENTATION = r"""
+author: Unknown (!UNKNOWN)
+name: logdna
+type: notification
+short_description: Sends playbook logs to LogDNA
+description:
+ - This callback reports logs from playbook actions, tasks, and events to LogDNA (U(https://app.logdna.com)).
+requirements:
+ - LogDNA Python Library (U(https://github.com/logdna/python))
+ - whitelisting in configuration
+options:
+ conf_key:
+ required: true
+ description: LogDNA Ingestion Key.
+ type: string
+ env:
+ - name: LOGDNA_INGESTION_KEY
+ ini:
+ - section: callback_logdna
+ key: conf_key
+ plugin_ignore_errors:
+ required: false
+ description: Whether to ignore errors on failing or not.
+ type: boolean
+ env:
+ - name: ANSIBLE_IGNORE_ERRORS
+ ini:
+ - section: callback_logdna
+ key: plugin_ignore_errors
+ default: false
+ conf_hostname:
+ required: false
+ description: Alternative Host Name; the current host name by default.
+ type: string
+ env:
+ - name: LOGDNA_HOSTNAME
+ ini:
+ - section: callback_logdna
+ key: conf_hostname
+ conf_tags:
+ required: false
+ description: Tags.
+ type: string
+ env:
+ - name: LOGDNA_TAGS
+ ini:
+ - section: callback_logdna
+ key: conf_tags
+ default: ansible
+"""
import logging
import json
@@ -73,7 +71,7 @@ except ImportError:
# Getting MAC Address of system:
def get_mac():
- mac = "%012x" % getnode()
+ mac = f"{getnode():012x}"
return ":".join(map(lambda index: mac[index:index + 2], range(int(len(mac) / 2))))
@@ -111,7 +109,7 @@ def isJSONable(obj):
class CallbackModule(CallbackBase):
CALLBACK_VERSION = 0.1
- CALLBACK_TYPE = 'aggregate'
+ CALLBACK_TYPE = 'notification'
CALLBACK_NAME = 'community.general.logdna'
CALLBACK_NEEDS_WHITELIST = True
@@ -161,7 +159,7 @@ class CallbackModule(CallbackBase):
if ninvalidKeys > 0:
for key in invalidKeys:
del meta[key]
- meta['__errors'] = 'These keys have been sanitized: ' + ', '.join(invalidKeys)
+ meta['__errors'] = f"These keys have been sanitized: {', '.join(invalidKeys)}"
return meta
def sanitizeJSON(self, data):
diff --git a/plugins/callback/logentries.py b/plugins/callback/logentries.py
index 945757edd6..8fbcef4dd6 100644
--- a/plugins/callback/logentries.py
+++ b/plugins/callback/logentries.py
@@ -1,80 +1,80 @@
-# -*- coding: utf-8 -*-
# Copyright (c) 2015, Logentries.com, Jimmy Tang
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
-DOCUMENTATION = '''
- author: Unknown (!UNKNOWN)
- name: logentries
- type: notification
- short_description: Sends events to Logentries
+DOCUMENTATION = r"""
+author: Unknown (!UNKNOWN)
+name: logentries
+type: notification
+short_description: Sends events to Logentries
+description:
+ - This callback plugin generates JSON objects and send them to Logentries using TCP for auditing/debugging purposes.
+requirements:
+ - whitelisting in configuration
+ - certifi (Python library)
+ - flatdict (Python library), if you want to use the O(flatten) option
+options:
+ api:
+ description: URI to the Logentries API.
+ type: str
+ env:
+ - name: LOGENTRIES_API
+ default: data.logentries.com
+ ini:
+ - section: callback_logentries
+ key: api
+ port:
+ description: HTTP port to use when connecting to the API.
+ type: int
+ env:
+ - name: LOGENTRIES_PORT
+ default: 80
+ ini:
+ - section: callback_logentries
+ key: port
+ tls_port:
+ description: Port to use when connecting to the API when TLS is enabled.
+ type: int
+ env:
+ - name: LOGENTRIES_TLS_PORT
+ default: 443
+ ini:
+ - section: callback_logentries
+ key: tls_port
+ token:
+ description: The logentries C(TCP token).
+ type: str
+ env:
+ - name: LOGENTRIES_ANSIBLE_TOKEN
+ required: true
+ ini:
+ - section: callback_logentries
+ key: token
+ use_tls:
description:
- - This callback plugin will generate JSON objects and send them to Logentries via TCP for auditing/debugging purposes.
- - Before 2.4, if you wanted to use an ini configuration, the file must be placed in the same directory as this plugin and named logentries.ini
- - In 2.4 and above you can just put it in the main Ansible configuration file.
- requirements:
- - whitelisting in configuration
- - certifi (python library)
- - flatdict (python library), if you want to use the 'flatten' option
- options:
- api:
- description: URI to the Logentries API
- env:
- - name: LOGENTRIES_API
- default: data.logentries.com
- ini:
- - section: callback_logentries
- key: api
- port:
- description: HTTP port to use when connecting to the API
- env:
- - name: LOGENTRIES_PORT
- default: 80
- ini:
- - section: callback_logentries
- key: port
- tls_port:
- description: Port to use when connecting to the API when TLS is enabled
- env:
- - name: LOGENTRIES_TLS_PORT
- default: 443
- ini:
- - section: callback_logentries
- key: tls_port
- token:
- description: The logentries "TCP token"
- env:
- - name: LOGENTRIES_ANSIBLE_TOKEN
- required: True
- ini:
- - section: callback_logentries
- key: token
- use_tls:
- description:
- - Toggle to decide whether to use TLS to encrypt the communications with the API server
- env:
- - name: LOGENTRIES_USE_TLS
- default: False
- type: boolean
- ini:
- - section: callback_logentries
- key: use_tls
- flatten:
- description: flatten complex data structures into a single dictionary with complex keys
- type: boolean
- default: False
- env:
- - name: LOGENTRIES_FLATTEN
- ini:
- - section: callback_logentries
- key: flatten
-'''
+ - Toggle to decide whether to use TLS to encrypt the communications with the API server.
+ env:
+ - name: LOGENTRIES_USE_TLS
+ default: false
+ type: boolean
+ ini:
+ - section: callback_logentries
+ key: use_tls
+ flatten:
+ description: Flatten complex data structures into a single dictionary with complex keys.
+ type: boolean
+ default: false
+ env:
+ - name: LOGENTRIES_FLATTEN
+ ini:
+ - section: callback_logentries
+ key: flatten
+"""
-EXAMPLES = '''
-examples: >
+EXAMPLES = r"""
+examples: >-
To enable, add this to your ansible.cfg file in the defaults block
[defaults]
@@ -90,10 +90,10 @@ examples: >
api = data.logentries.com
port = 10000
tls_port = 20000
- use_tls = no
+ use_tls = true
token = dd21fc88-f00a-43ff-b977-e3a4233c53af
- flatten = False
-'''
+ flatten = false
+"""
import os
import socket
@@ -131,7 +131,7 @@ class PlainTextSocketAppender(object):
# Error message displayed when an incorrect Token has been detected
self.INVALID_TOKEN = "\n\nIt appears the LOGENTRIES_TOKEN parameter you entered is incorrect!\n\n"
# Unicode Line separator character \u2028
- self.LINE_SEP = u'\u2028'
+ self.LINE_SEP = '\u2028'
self._display = display
self._conn = None
@@ -149,7 +149,7 @@ class PlainTextSocketAppender(object):
self.open_connection()
return
except Exception as e:
- self._display.vvvv(u"Unable to connect to Logentries: %s" % to_text(e))
+ self._display.vvvv(f"Unable to connect to Logentries: {e}")
root_delay *= 2
if root_delay > self.MAX_DELAY:
@@ -158,7 +158,7 @@ class PlainTextSocketAppender(object):
wait_for = root_delay + random.uniform(0, root_delay)
try:
- self._display.vvvv("sleeping %s before retry" % wait_for)
+ self._display.vvvv(f"sleeping {wait_for} before retry")
time.sleep(wait_for)
except KeyboardInterrupt:
raise
@@ -171,8 +171,8 @@ class PlainTextSocketAppender(object):
# Replace newlines with Unicode line separator
# for multi-line events
data = to_text(data, errors='surrogate_or_strict')
- multiline = data.replace(u'\n', self.LINE_SEP)
- multiline += u"\n"
+ multiline = data.replace('\n', self.LINE_SEP)
+ multiline += "\n"
# Send data, reconnect if needed
while True:
try:
@@ -196,15 +196,11 @@ else:
class TLSSocketAppender(PlainTextSocketAppender):
def open_connection(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
- sock = ssl.wrap_socket(
+ context = ssl.create_default_context(
+ purpose=ssl.Purpose.SERVER_AUTH,
+ cafile=certifi.where(), )
+ sock = context.wrap_socket(
sock=sock,
- keyfile=None,
- certfile=None,
- server_side=False,
- cert_reqs=ssl.CERT_REQUIRED,
- ssl_version=getattr(
- ssl, 'PROTOCOL_TLSv1_2', ssl.PROTOCOL_TLSv1),
- ca_certs=certifi.where(),
do_handshake_on_connect=True,
suppress_ragged_eofs=True, )
sock.connect((self.LE_API, self.LE_TLS_PORT))
@@ -249,7 +245,7 @@ class CallbackModule(CallbackBase):
self.use_tls = self.get_option('use_tls')
self.flatten = self.get_option('flatten')
except KeyError as e:
- self._display.warning(u"Missing option for Logentries callback plugin: %s" % to_text(e))
+ self._display.warning(f"Missing option for Logentries callback plugin: {e}")
self.disabled = True
try:
@@ -268,10 +264,10 @@ class CallbackModule(CallbackBase):
if not self.disabled:
if self.use_tls:
- self._display.vvvv("Connecting to %s:%s with TLS" % (self.api_url, self.api_tls_port))
+ self._display.vvvv(f"Connecting to {self.api_url}:{self.api_tls_port} with TLS")
self._appender = TLSSocketAppender(display=self._display, LE_API=self.api_url, LE_TLS_PORT=self.api_tls_port)
else:
- self._display.vvvv("Connecting to %s:%s" % (self.api_url, self.api_port))
+ self._display.vvvv(f"Connecting to {self.api_url}:{self.api_port}")
self._appender = PlainTextSocketAppender(display=self._display, LE_API=self.api_url, LE_PORT=self.api_port)
self._appender.reopen_connection()
@@ -284,7 +280,7 @@ class CallbackModule(CallbackBase):
def emit(self, record):
msg = record.rstrip('\n')
- msg = "{0} {1}".format(self.token, msg)
+ msg = f"{self.token} {msg}"
self._appender.put(msg)
self._display.vvvv("Sent event to logentries")
diff --git a/plugins/callback/logstash.py b/plugins/callback/logstash.py
index 5d3c1e50b8..f2279929f0 100644
--- a/plugins/callback/logstash.py
+++ b/plugins/callback/logstash.py
@@ -1,97 +1,98 @@
-# -*- coding: utf-8 -*-
# Copyright (c) 2020, Yevhen Khmelenko
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
-DOCUMENTATION = r'''
- author: Yevhen Khmelenko (@ujenmr)
- name: logstash
- type: notification
- short_description: Sends events to Logstash
- description:
- - This callback will report facts and task events to Logstash https://www.elastic.co/products/logstash
- requirements:
- - whitelisting in configuration
- - logstash (python library)
- options:
- server:
- description: Address of the Logstash server
- env:
- - name: LOGSTASH_SERVER
- ini:
- - section: callback_logstash
- key: server
- version_added: 1.0.0
- default: localhost
- port:
- description: Port on which logstash is listening
- env:
- - name: LOGSTASH_PORT
- ini:
- - section: callback_logstash
- key: port
- version_added: 1.0.0
- default: 5000
- type:
- description: Message type
- env:
- - name: LOGSTASH_TYPE
- ini:
- - section: callback_logstash
- key: type
- version_added: 1.0.0
- default: ansible
- pre_command:
- description: Executes command before run and its result is added to the C(ansible_pre_command_output) logstash field.
- version_added: 2.0.0
- ini:
- - section: callback_logstash
- key: pre_command
- env:
- - name: LOGSTASH_PRE_COMMAND
- format_version:
- description: Logging format
- type: str
- version_added: 2.0.0
- ini:
- - section: callback_logstash
- key: format_version
- env:
- - name: LOGSTASH_FORMAT_VERSION
- default: v1
- choices:
- - v1
- - v2
+DOCUMENTATION = r"""
+author: Yevhen Khmelenko (@ujenmr)
+name: logstash
+type: notification
+short_description: Sends events to Logstash
+description:
+ - This callback reports facts and task events to Logstash U(https://www.elastic.co/products/logstash).
+requirements:
+ - whitelisting in configuration
+ - logstash (Python library)
+options:
+ server:
+ description: Address of the Logstash server.
+ type: str
+ env:
+ - name: LOGSTASH_SERVER
+ ini:
+ - section: callback_logstash
+ key: server
+ version_added: 1.0.0
+ default: localhost
+ port:
+ description: Port on which logstash is listening.
+ type: int
+ env:
+ - name: LOGSTASH_PORT
+ ini:
+ - section: callback_logstash
+ key: port
+ version_added: 1.0.0
+ default: 5000
+ type:
+ description: Message type.
+ type: str
+ env:
+ - name: LOGSTASH_TYPE
+ ini:
+ - section: callback_logstash
+ key: type
+ version_added: 1.0.0
+ default: ansible
+ pre_command:
+ description: Executes command before run and its result is added to the C(ansible_pre_command_output) logstash field.
+ type: str
+ version_added: 2.0.0
+ ini:
+ - section: callback_logstash
+ key: pre_command
+ env:
+ - name: LOGSTASH_PRE_COMMAND
+ format_version:
+ description: Logging format.
+ type: str
+ version_added: 2.0.0
+ ini:
+ - section: callback_logstash
+ key: format_version
+ env:
+ - name: LOGSTASH_FORMAT_VERSION
+ default: v1
+ choices:
+ - v1
+ - v2
+"""
-'''
-
-EXAMPLES = r'''
+EXAMPLES = r"""
ansible.cfg: |
- # Enable Callback plugin
- [defaults]
- callback_whitelist = community.general.logstash
+ # Enable Callback plugin
+ [defaults]
+ callback_whitelist = community.general.logstash
- [callback_logstash]
- server = logstash.example.com
- port = 5000
- pre_command = git rev-parse HEAD
- type = ansible
+ [callback_logstash]
+ server = logstash.example.com
+ port = 5000
+ pre_command = git rev-parse HEAD
+ type = ansible
-11-input-tcp.conf: |
- # Enable Logstash TCP Input
- input {
- tcp {
- port => 5000
- codec => json
- add_field => { "[@metadata][beat]" => "notify" }
- add_field => { "[@metadata][type]" => "ansible" }
- }
- }
-'''
+11-input-tcp.conf: |-
+ # Enable Logstash TCP Input
+ input {
+ tcp {
+ port => 5000
+ codec => json
+ add_field => { "[@metadata][beat]" => "notify" }
+ add_field => { "[@metadata][type]" => "ansible" }
+ }
+ }
+"""
import os
import json
@@ -99,7 +100,6 @@ from ansible import context
import socket
import uuid
import logging
-from datetime import datetime
try:
import logstash
@@ -109,11 +109,15 @@ except ImportError:
from ansible.plugins.callback import CallbackBase
+from ansible_collections.community.general.plugins.module_utils.datetime import (
+ now,
+)
+
class CallbackModule(CallbackBase):
CALLBACK_VERSION = 2.0
- CALLBACK_TYPE = 'aggregate'
+ CALLBACK_TYPE = 'notification'
CALLBACK_NAME = 'community.general.logstash'
CALLBACK_NEEDS_WHITELIST = True
@@ -122,11 +126,9 @@ class CallbackModule(CallbackBase):
if not HAS_LOGSTASH:
self.disabled = True
- self._display.warning("The required python-logstash/python3-logstash is not installed. "
- "pip install python-logstash for Python 2"
- "pip install python3-logstash for Python 3")
+ self._display.warning("The required python3-logstash is not installed.")
- self.start_time = datetime.utcnow()
+ self.start_time = now()
def _init_plugin(self):
if not self.disabled:
@@ -177,7 +179,7 @@ class CallbackModule(CallbackBase):
data['status'] = "OK"
data['ansible_playbook'] = playbook._file_name
- if (self.ls_format_version == "v2"):
+ if self.ls_format_version == "v2":
self.logger.info(
"START PLAYBOOK | %s", data['ansible_playbook'], extra=data
)
@@ -185,7 +187,7 @@ class CallbackModule(CallbackBase):
self.logger.info("ansible start", extra=data)
def v2_playbook_on_stats(self, stats):
- end_time = datetime.utcnow()
+ end_time = now()
runtime = end_time - self.start_time
summarize_stat = {}
for host in stats.processed.keys():
@@ -202,7 +204,7 @@ class CallbackModule(CallbackBase):
data['ansible_playbook_duration'] = runtime.total_seconds()
data['ansible_result'] = json.dumps(summarize_stat) # deprecated field
- if (self.ls_format_version == "v2"):
+ if self.ls_format_version == "v2":
self.logger.info(
"FINISH PLAYBOOK | %s", json.dumps(summarize_stat), extra=data
)
@@ -221,7 +223,7 @@ class CallbackModule(CallbackBase):
data['ansible_play_id'] = self.play_id
data['ansible_play_name'] = self.play_name
- if (self.ls_format_version == "v2"):
+ if self.ls_format_version == "v2":
self.logger.info("START PLAY | %s", self.play_name, extra=data)
else:
self.logger.info("ansible play", extra=data)
@@ -246,7 +248,7 @@ class CallbackModule(CallbackBase):
data['ansible_task'] = task_name
data['ansible_facts'] = self._dump_results(result._result)
- if (self.ls_format_version == "v2"):
+ if self.ls_format_version == "v2":
self.logger.info(
"SETUP FACTS | %s", self._dump_results(result._result), extra=data
)
@@ -267,7 +269,7 @@ class CallbackModule(CallbackBase):
data['ansible_task_id'] = self.task_id
data['ansible_result'] = self._dump_results(result._result)
- if (self.ls_format_version == "v2"):
+ if self.ls_format_version == "v2":
self.logger.info(
"TASK OK | %s | RESULT | %s",
task_name, self._dump_results(result._result), extra=data
@@ -288,7 +290,7 @@ class CallbackModule(CallbackBase):
data['ansible_task_id'] = self.task_id
data['ansible_result'] = self._dump_results(result._result)
- if (self.ls_format_version == "v2"):
+ if self.ls_format_version == "v2":
self.logger.info("TASK SKIPPED | %s", task_name, extra=data)
else:
self.logger.info("ansible skipped", extra=data)
@@ -302,7 +304,7 @@ class CallbackModule(CallbackBase):
data['ansible_play_name'] = self.play_name
data['imported_file'] = imported_file
- if (self.ls_format_version == "v2"):
+ if self.ls_format_version == "v2":
self.logger.info("IMPORT | %s", imported_file, extra=data)
else:
self.logger.info("ansible import", extra=data)
@@ -316,7 +318,7 @@ class CallbackModule(CallbackBase):
data['ansible_play_name'] = self.play_name
data['imported_file'] = missing_file
- if (self.ls_format_version == "v2"):
+ if self.ls_format_version == "v2":
self.logger.info("NOT IMPORTED | %s", missing_file, extra=data)
else:
self.logger.info("ansible import", extra=data)
@@ -340,7 +342,7 @@ class CallbackModule(CallbackBase):
data['ansible_result'] = self._dump_results(result._result)
self.errors += 1
- if (self.ls_format_version == "v2"):
+ if self.ls_format_version == "v2":
self.logger.error(
"TASK FAILED | %s | HOST | %s | RESULT | %s",
task_name, self.hostname,
@@ -363,7 +365,7 @@ class CallbackModule(CallbackBase):
data['ansible_result'] = self._dump_results(result._result)
self.errors += 1
- if (self.ls_format_version == "v2"):
+ if self.ls_format_version == "v2":
self.logger.error(
"UNREACHABLE | %s | HOST | %s | RESULT | %s",
task_name, self.hostname,
@@ -386,7 +388,7 @@ class CallbackModule(CallbackBase):
data['ansible_result'] = self._dump_results(result._result)
self.errors += 1
- if (self.ls_format_version == "v2"):
+ if self.ls_format_version == "v2":
self.logger.error(
"ASYNC FAILED | %s | HOST | %s | RESULT | %s",
task_name, self.hostname,
diff --git a/plugins/callback/mail.py b/plugins/callback/mail.py
index d20600e710..7afb08e3f0 100644
--- a/plugins/callback/mail.py
+++ b/plugins/callback/mail.py
@@ -1,76 +1,84 @@
-# -*- coding: utf-8 -*-
# Copyright (c) 2012, Dag Wieers
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
-DOCUMENTATION = '''
+DOCUMENTATION = r"""
name: mail
type: notification
-short_description: Sends failure events via email
+short_description: Sends failure events through email
description:
-- This callback will report failures via email.
+ - This callback reports failures through email.
author:
-- Dag Wieers (@dagwieers)
+ - Dag Wieers (@dagwieers)
requirements:
-- whitelisting in configuration
+ - whitelisting in configuration
options:
mta:
description:
- - Mail Transfer Agent, server that accepts SMTP.
+ - Mail Transfer Agent, server that accepts SMTP.
type: str
env:
- - name: SMTPHOST
+ - name: SMTPHOST
ini:
- - section: callback_mail
- key: smtphost
+ - section: callback_mail
+ key: smtphost
default: localhost
mtaport:
description:
- - Mail Transfer Agent Port.
- - Port at which server SMTP.
+ - Mail Transfer Agent Port.
+ - Port at which server SMTP.
type: int
ini:
- - section: callback_mail
- key: smtpport
+ - section: callback_mail
+ key: smtpport
default: 25
to:
description:
- - Mail recipient.
+ - Mail recipient.
type: list
elements: str
ini:
- - section: callback_mail
- key: to
+ - section: callback_mail
+ key: to
default: [root]
sender:
description:
- - Mail sender.
- - Note that this will be required from community.general 6.0.0 on.
+ - Mail sender.
+ - This is required since community.general 6.0.0.
type: str
+ required: true
ini:
- - section: callback_mail
- key: sender
+ - section: callback_mail
+ key: sender
cc:
description:
- - CC'd recipients.
+ - CC'd recipients.
type: list
elements: str
ini:
- - section: callback_mail
- key: cc
+ - section: callback_mail
+ key: cc
bcc:
description:
- - BCC'd recipients.
+ - BCC'd recipients.
type: list
elements: str
ini:
- - section: callback_mail
- key: bcc
-'''
+ - section: callback_mail
+ key: bcc
+ message_id_domain:
+ description:
+ - The domain name to use for the L(Message-ID header, https://en.wikipedia.org/wiki/Message-ID).
+ - The default is the hostname of the control node.
+ type: str
+ ini:
+ - section: callback_mail
+ key: message_id_domain
+ version_added: 8.2.0
+"""
import json
import os
@@ -78,7 +86,6 @@ import re
import email.utils
import smtplib
-from ansible.module_utils.six import string_types
from ansible.module_utils.common.text.converters import to_bytes
from ansible.parsing.ajson import AnsibleJSONEncoder
from ansible.plugins.callback import CallbackBase
@@ -105,10 +112,6 @@ class CallbackModule(CallbackBase):
super(CallbackModule, self).set_options(task_keys=task_keys, var_options=var_options, direct=direct)
self.sender = self.get_option('sender')
- if self.sender is None:
- self._display.deprecated(
- 'The sender for the mail callback has not been specified. This will be an error in the future',
- version='6.0.0', collection_name='community.general')
self.to = self.get_option('to')
self.smtphost = self.get_option('mta')
self.smtpport = self.get_option('mtaport')
@@ -129,14 +132,14 @@ class CallbackModule(CallbackBase):
if self.bcc:
bcc_addresses = email.utils.getaddresses(self.bcc)
- content = 'Date: %s\n' % email.utils.formatdate()
- content += 'From: %s\n' % email.utils.formataddr(sender_address)
+ content = f'Date: {email.utils.formatdate()}\n'
+ content += f'From: {email.utils.formataddr(sender_address)}\n'
if self.to:
- content += 'To: %s\n' % ', '.join([email.utils.formataddr(pair) for pair in to_addresses])
+ content += f"To: {', '.join([email.utils.formataddr(pair) for pair in to_addresses])}\n"
if self.cc:
- content += 'Cc: %s\n' % ', '.join([email.utils.formataddr(pair) for pair in cc_addresses])
- content += 'Message-ID: %s\n' % email.utils.make_msgid()
- content += 'Subject: %s\n\n' % subject.strip()
+ content += f"Cc: {', '.join([email.utils.formataddr(pair) for pair in cc_addresses])}\n"
+ content += f"Message-ID: {email.utils.make_msgid(domain=self.get_option('message_id_domain'))}\n"
+ content += f'Subject: {subject.strip()}\n\n'
content += body
addresses = to_addresses
@@ -153,23 +156,22 @@ class CallbackModule(CallbackBase):
smtp.quit()
def subject_msg(self, multiline, failtype, linenr):
- return '%s: %s' % (failtype, multiline.strip('\r\n').splitlines()[linenr])
+ msg = multiline.strip('\r\n').splitlines()[linenr]
+ return f'{failtype}: {msg}'
def indent(self, multiline, indent=8):
return re.sub('^', ' ' * indent, multiline, flags=re.MULTILINE)
def body_blob(self, multiline, texttype):
''' Turn some text output in a well-indented block for sending in a mail body '''
- intro = 'with the following %s:\n\n' % texttype
- blob = ''
- for line in multiline.strip('\r\n').splitlines():
- blob += '%s\n' % line
- return intro + self.indent(blob) + '\n'
+ intro = f'with the following {texttype}:\n\n'
+ blob = "\n".join(multiline.strip('\r\n').splitlines())
+ return f"{intro}{self.indent(blob)}\n"
def mail_result(self, result, failtype):
host = result._host.get_name()
if not self.sender:
- self.sender = '"Ansible: %s" ' % host
+ self.sender = f'"Ansible: {host}" '
# Add subject
if self.itembody:
@@ -185,31 +187,33 @@ class CallbackModule(CallbackBase):
elif result._result.get('exception'): # Unrelated exceptions are added to output :-/
subject = self.subject_msg(result._result['exception'], failtype, -1)
else:
- subject = '%s: %s' % (failtype, result._task.name or result._task.action)
+ subject = f'{failtype}: {result._task.name or result._task.action}'
# Make playbook name visible (e.g. in Outlook/Gmail condensed view)
- body = 'Playbook: %s\n' % os.path.basename(self.playbook._file_name)
+ body = f'Playbook: {os.path.basename(self.playbook._file_name)}\n'
if result._task.name:
- body += 'Task: %s\n' % result._task.name
- body += 'Module: %s\n' % result._task.action
- body += 'Host: %s\n' % host
+ body += f'Task: {result._task.name}\n'
+ body += f'Module: {result._task.action}\n'
+ body += f'Host: {host}\n'
body += '\n'
# Add task information (as much as possible)
body += 'The following task failed:\n\n'
if 'invocation' in result._result:
- body += self.indent('%s: %s\n' % (result._task.action, json.dumps(result._result['invocation']['module_args'], indent=4)))
+ body += self.indent(f"{result._task.action}: {json.dumps(result._result['invocation']['module_args'], indent=4)}\n")
elif result._task.name:
- body += self.indent('%s (%s)\n' % (result._task.name, result._task.action))
+ body += self.indent(f'{result._task.name} ({result._task.action})\n')
else:
- body += self.indent('%s\n' % result._task.action)
+ body += self.indent(f'{result._task.action}\n')
body += '\n'
# Add item / message
if self.itembody:
body += self.itembody
elif result._result.get('failed_when_result') is True:
- body += "due to the following condition:\n\n" + self.indent('failed_when:\n- ' + '\n- '.join(result._task.failed_when)) + '\n\n'
+ fail_cond_list = '\n- '.join(result._task.failed_when)
+ fail_cond = self.indent(f"failed_when:\n- {fail_cond_list}")
+ body += f"due to the following condition:\n\n{fail_cond}\n\n"
elif result._result.get('msg'):
body += self.body_blob(result._result['msg'], 'message')
@@ -222,13 +226,13 @@ class CallbackModule(CallbackBase):
body += self.body_blob(result._result['exception'], 'exception')
if result._result.get('warnings'):
for i in range(len(result._result.get('warnings'))):
- body += self.body_blob(result._result['warnings'][i], 'exception %d' % (i + 1))
+ body += self.body_blob(result._result['warnings'][i], f'exception {i + 1}')
if result._result.get('deprecations'):
for i in range(len(result._result.get('deprecations'))):
- body += self.body_blob(result._result['deprecations'][i], 'exception %d' % (i + 1))
+ body += self.body_blob(result._result['deprecations'][i], f'exception {i + 1}')
body += 'and a complete dump of the error:\n\n'
- body += self.indent('%s: %s' % (failtype, json.dumps(result._result, cls=AnsibleJSONEncoder, indent=4)))
+ body += self.indent(f'{failtype}: {json.dumps(result._result, cls=AnsibleJSONEncoder, indent=4)}')
self.mail(subject=subject, body=body)
@@ -251,4 +255,4 @@ class CallbackModule(CallbackBase):
def v2_runner_item_on_failed(self, result):
# Pass item information to task failure
self.itemsubject = result._result['msg']
- self.itembody += self.body_blob(json.dumps(result._result, cls=AnsibleJSONEncoder, indent=4), "failed item dump '%(item)s'" % result._result)
+ self.itembody += self.body_blob(json.dumps(result._result, cls=AnsibleJSONEncoder, indent=4), f"failed item dump '{result._result['item']}'")
diff --git a/plugins/callback/nrdp.py b/plugins/callback/nrdp.py
index 8295bf9759..6f1b5e2f5b 100644
--- a/plugins/callback/nrdp.py
+++ b/plugins/callback/nrdp.py
@@ -1,76 +1,72 @@
-# -*- coding: utf-8 -*-
# Copyright (c) 2018 Remi Verchere
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
# Make coding more python3-ish
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
-DOCUMENTATION = '''
- name: nrdp
- type: notification
- author: "Remi VERCHERE (@rverchere)"
- short_description: Post task results to a Nagios server through nrdp
- description:
- - This callback send playbook result to Nagios.
- - Nagios shall use NRDP to recive passive events.
- - The passive check is sent to a dedicated host/service for Ansible.
- options:
- url:
- description: URL of the nrdp server.
- required: true
- env:
- - name : NRDP_URL
- ini:
- - section: callback_nrdp
- key: url
- type: string
- validate_certs:
- description: Validate the SSL certificate of the nrdp server. (Used for HTTPS URLs.)
- env:
- - name: NRDP_VALIDATE_CERTS
- ini:
- - section: callback_nrdp
- key: validate_nrdp_certs
- - section: callback_nrdp
- key: validate_certs
- type: boolean
- default: false
- aliases: [ validate_nrdp_certs ]
- token:
- description: Token to be allowed to push nrdp events.
- required: true
- env:
- - name: NRDP_TOKEN
- ini:
- - section: callback_nrdp
- key: token
- type: string
- hostname:
- description: Hostname where the passive check is linked to.
- required: true
- env:
- - name : NRDP_HOSTNAME
- ini:
- - section: callback_nrdp
- key: hostname
- type: string
- servicename:
- description: Service where the passive check is linked to.
- required: true
- env:
- - name : NRDP_SERVICENAME
- ini:
- - section: callback_nrdp
- key: servicename
- type: string
-'''
+DOCUMENTATION = r"""
+name: nrdp
+type: notification
+author: "Remi VERCHERE (@rverchere)"
+short_description: Post task results to a Nagios server through nrdp
+description:
+ - This callback send playbook result to Nagios.
+ - Nagios shall use NRDP to receive passive events.
+ - The passive check is sent to a dedicated host/service for Ansible.
+options:
+ url:
+ description: URL of the nrdp server.
+ required: true
+ env:
+ - name: NRDP_URL
+ ini:
+ - section: callback_nrdp
+ key: url
+ type: string
+ validate_certs:
+ description: Validate the SSL certificate of the nrdp server. (Used for HTTPS URLs).
+ env:
+ - name: NRDP_VALIDATE_CERTS
+ ini:
+ - section: callback_nrdp
+ key: validate_nrdp_certs
+ - section: callback_nrdp
+ key: validate_certs
+ type: boolean
+ default: false
+ aliases: [validate_nrdp_certs]
+ token:
+ description: Token to be allowed to push nrdp events.
+ required: true
+ env:
+ - name: NRDP_TOKEN
+ ini:
+ - section: callback_nrdp
+ key: token
+ type: string
+ hostname:
+ description: Hostname where the passive check is linked to.
+ required: true
+ env:
+ - name: NRDP_HOSTNAME
+ ini:
+ - section: callback_nrdp
+ key: hostname
+ type: string
+ servicename:
+ description: Service where the passive check is linked to.
+ required: true
+ env:
+ - name: NRDP_SERVICENAME
+ ini:
+ - section: callback_nrdp
+ key: servicename
+ type: string
+"""
-import os
-import json
+from urllib.parse import urlencode
-from ansible.module_utils.six.moves.urllib.parse import urlencode
from ansible.module_utils.common.text.converters import to_bytes
from ansible.module_utils.urls import open_url
from ansible.plugins.callback import CallbackBase
@@ -135,10 +131,10 @@ class CallbackModule(CallbackBase):
xmldata = "\n"
xmldata += "\n"
xmldata += "\n"
- xmldata += "%s\n" % self.hostname
- xmldata += "%s\n" % self.servicename
- xmldata += "%d\n" % state
- xmldata += "\n" % msg
+ xmldata += f"{self.hostname}\n"
+ xmldata += f"{self.servicename}\n"
+ xmldata += f"{state}\n"
+ xmldata += f"\n"
xmldata += "\n"
xmldata += "\n"
@@ -155,7 +151,7 @@ class CallbackModule(CallbackBase):
validate_certs=self.validate_nrdp_certs)
return response.read()
except Exception as ex:
- self._display.warning("NRDP callback cannot send result {0}".format(ex))
+ self._display.warning(f"NRDP callback cannot send result {ex}")
def v2_playbook_on_play_start(self, play):
'''
@@ -173,17 +169,16 @@ class CallbackModule(CallbackBase):
critical = warning = 0
for host in hosts:
stat = stats.summarize(host)
- gstats += "'%s_ok'=%d '%s_changed'=%d \
- '%s_unreachable'=%d '%s_failed'=%d " % \
- (host, stat['ok'], host, stat['changed'],
- host, stat['unreachable'], host, stat['failures'])
+ gstats += (
+ f"'{host}_ok'={stat['ok']} '{host}_changed'={stat['changed']} '{host}_unreachable'={stat['unreachable']} '{host}_failed'={stat['failures']} "
+ )
# Critical when failed tasks or unreachable host
critical += stat['failures']
critical += stat['unreachable']
# Warning when changed tasks
warning += stat['changed']
- msg = "%s | %s" % (name, gstats)
+ msg = f"{name} | {gstats}"
if critical:
# Send Critical
self._send_nrdp(self.CRITICAL, msg)
diff --git a/plugins/callback/null.py b/plugins/callback/null.py
index 01f5f6ca06..3074a698d0 100644
--- a/plugins/callback/null.py
+++ b/plugins/callback/null.py
@@ -1,22 +1,20 @@
-# -*- coding: utf-8 -*-
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
# Make coding more python3-ish
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
-DOCUMENTATION = '''
- author: Unknown (!UNKNOWN)
- name: 'null'
- type: stdout
- requirements:
- - set as main display callback
- short_description: Don't display stuff to screen
- description:
- - This callback prevents outputing events to screen
-'''
+DOCUMENTATION = r"""
+author: Unknown (!UNKNOWN)
+name: 'null'
+type: stdout
+requirements:
+ - set as main display callback
+short_description: Do not display stuff to screen
+description:
+ - This callback prevents outputting events to screen.
+"""
from ansible.plugins.callback import CallbackBase
@@ -24,7 +22,7 @@ from ansible.plugins.callback import CallbackBase
class CallbackModule(CallbackBase):
'''
- This callback wont print messages to stdout when new callback events are received.
+ This callback won't print messages to stdout when new callback events are received.
'''
CALLBACK_VERSION = 2.0
diff --git a/plugins/callback/opentelemetry.py b/plugins/callback/opentelemetry.py
index d9faa4d729..ca6ec2b916 100644
--- a/plugins/callback/opentelemetry.py
+++ b/plugins/callback/opentelemetry.py
@@ -1,76 +1,123 @@
-# -*- coding: utf-8 -*-
# Copyright (c) 2021, Victor Martinez
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
-DOCUMENTATION = '''
- author: Victor Martinez (@v1v)
- name: opentelemetry
- type: notification
- short_description: Create distributed traces with OpenTelemetry
- version_added: 3.7.0
+DOCUMENTATION = r"""
+author: Victor Martinez (@v1v)
+name: opentelemetry
+type: notification
+short_description: Create distributed traces with OpenTelemetry
+version_added: 3.7.0
+description:
+ - This callback creates distributed traces for each Ansible task with OpenTelemetry.
+ - You can configure the OpenTelemetry exporter and SDK with environment variables.
+ - See U(https://opentelemetry-python.readthedocs.io/en/latest/exporter/otlp/otlp.html).
+ - See
+ U(https://opentelemetry-python.readthedocs.io/en/latest/sdk/environment_variables.html#opentelemetry-sdk-environment-variables).
+options:
+ hide_task_arguments:
+ default: false
+ type: bool
description:
- - This callback creates distributed traces for each Ansible task with OpenTelemetry.
- - You can configure the OpenTelemetry exporter and SDK with environment variables.
- - See U(https://opentelemetry-python.readthedocs.io/en/latest/exporter/otlp/otlp.html).
- - See U(https://opentelemetry-python.readthedocs.io/en/latest/sdk/environment_variables.html#opentelemetry-sdk-environment-variables).
- options:
- hide_task_arguments:
- default: false
- type: bool
- description:
- - Hide the arguments for a task.
- env:
- - name: ANSIBLE_OPENTELEMETRY_HIDE_TASK_ARGUMENTS
- ini:
- - section: callback_opentelemetry
- key: hide_task_arguments
- version_added: 5.3.0
- enable_from_environment:
- type: str
- description:
- - Whether to enable this callback only if the given environment variable exists and it is set to C(true).
- - This is handy when you use Configuration as Code and want to send distributed traces
- if running in the CI rather when running Ansible locally.
- - For such, it evaluates the given I(enable_from_environment) value as environment variable
- and if set to true this plugin will be enabled.
- env:
- - name: ANSIBLE_OPENTELEMETRY_ENABLE_FROM_ENVIRONMENT
- ini:
- - section: callback_opentelemetry
- key: enable_from_environment
- version_added: 5.3.0
- version_added: 3.8.0
- otel_service_name:
- default: ansible
- type: str
- description:
- - The service name resource attribute.
- env:
- - name: OTEL_SERVICE_NAME
- ini:
- - section: callback_opentelemetry
- key: otel_service_name
- version_added: 5.3.0
- traceparent:
- default: None
- type: str
- description:
- - The L(W3C Trace Context header traceparent,https://www.w3.org/TR/trace-context-1/#traceparent-header).
- env:
- - name: TRACEPARENT
- requirements:
- - opentelemetry-api (Python library)
- - opentelemetry-exporter-otlp (Python library)
- - opentelemetry-sdk (Python library)
-'''
+ - Hide the arguments for a task.
+ env:
+ - name: ANSIBLE_OPENTELEMETRY_HIDE_TASK_ARGUMENTS
+ ini:
+ - section: callback_opentelemetry
+ key: hide_task_arguments
+ version_added: 5.3.0
+ enable_from_environment:
+ type: str
+ description:
+ - Whether to enable this callback only if the given environment variable exists and it is set to V(true).
+ - This is handy when you use Configuration as Code and want to send distributed traces if running in the CI rather when
+ running Ansible locally.
+ - For such, it evaluates the given O(enable_from_environment) value as environment variable and if set to V(true) this
+ plugin is enabled.
+ env:
+ - name: ANSIBLE_OPENTELEMETRY_ENABLE_FROM_ENVIRONMENT
+ ini:
+ - section: callback_opentelemetry
+ key: enable_from_environment
+ version_added: 5.3.0
+ version_added: 3.8.0
+ otel_service_name:
+ default: ansible
+ type: str
+ description:
+ - The service name resource attribute.
+ env:
+ - name: OTEL_SERVICE_NAME
+ ini:
+ - section: callback_opentelemetry
+ key: otel_service_name
+ version_added: 5.3.0
+ traceparent:
+ default: None
+ type: str
+ description:
+ - The L(W3C Trace Context header traceparent,https://www.w3.org/TR/trace-context-1/#traceparent-header).
+ env:
+ - name: TRACEPARENT
+ disable_logs:
+ default: false
+ type: bool
+ description:
+ - Disable sending logs.
+ env:
+ - name: ANSIBLE_OPENTELEMETRY_DISABLE_LOGS
+ ini:
+ - section: callback_opentelemetry
+ key: disable_logs
+ version_added: 5.8.0
+ disable_attributes_in_logs:
+ default: false
+ type: bool
+ description:
+ - Disable populating span attributes to the logs.
+ env:
+ - name: ANSIBLE_OPENTELEMETRY_DISABLE_ATTRIBUTES_IN_LOGS
+ ini:
+ - section: callback_opentelemetry
+ key: disable_attributes_in_logs
+ version_added: 7.1.0
+ store_spans_in_file:
+ type: str
+ description:
+ - It stores the exported spans in the given file.
+ env:
+ - name: ANSIBLE_OPENTELEMETRY_STORE_SPANS_IN_FILE
+ ini:
+ - section: callback_opentelemetry
+ key: store_spans_in_file
+ version_added: 9.0.0
+ otel_exporter_otlp_traces_protocol:
+ type: str
+ description:
+ - E(OTEL_EXPORTER_OTLP_TRACES_PROTOCOL) represents the transport protocol for spans.
+ - See
+ U(https://opentelemetry-python.readthedocs.io/en/latest/sdk/environment_variables.html#envvar-OTEL_EXPORTER_OTLP_TRACES_PROTOCOL).
+ default: grpc
+ choices:
+ - grpc
+ - http/protobuf
+ env:
+ - name: OTEL_EXPORTER_OTLP_TRACES_PROTOCOL
+ ini:
+ - section: callback_opentelemetry
+ key: otel_exporter_otlp_traces_protocol
+ version_added: 9.0.0
+requirements:
+ - opentelemetry-api (Python library)
+ - opentelemetry-exporter-otlp (Python library)
+ - opentelemetry-sdk (Python library)
+"""
-EXAMPLES = '''
-examples: |
+EXAMPLES = r"""
+examples: |-
Enable the plugin in ansible.cfg:
[defaults]
callbacks_enabled = community.general.opentelemetry
@@ -82,35 +129,38 @@ examples: |
export OTEL_EXPORTER_OTLP_HEADERS="authorization=Bearer your_otel_token"
export OTEL_SERVICE_NAME=your_service_name
export ANSIBLE_OPENTELEMETRY_ENABLED=true
-'''
+"""
import getpass
+import json
import os
import socket
-import sys
-import time
import uuid
-
from collections import OrderedDict
from os.path import basename
+from time import time_ns
+from urllib.parse import urlparse
from ansible.errors import AnsibleError
-from ansible.module_utils.six import raise_from
-from ansible.module_utils.six.moves.urllib.parse import urlparse
+from ansible.module_utils.ansible_release import __version__ as ansible_version
from ansible.plugins.callback import CallbackBase
try:
from opentelemetry import trace
from opentelemetry.trace import SpanKind
- from opentelemetry.exporter.otlp.proto.grpc.trace_exporter import OTLPSpanExporter
+ from opentelemetry.exporter.otlp.proto.grpc.trace_exporter import OTLPSpanExporter as GRPCOTLPSpanExporter
+ from opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporter as HTTPOTLPSpanExporter
from opentelemetry.sdk.resources import SERVICE_NAME, Resource
from opentelemetry.trace.status import Status, StatusCode
from opentelemetry.trace.propagation.tracecontext import TraceContextTextMapPropagator
from opentelemetry.sdk.trace import TracerProvider
from opentelemetry.sdk.trace.export import (
- BatchSpanProcessor
+ BatchSpanProcessor,
+ SimpleSpanProcessor
+ )
+ from opentelemetry.sdk.trace.export.in_memory_span_exporter import (
+ InMemorySpanExporter
)
- from opentelemetry.util._time import _time_ns
except ImportError as imp_exc:
OTEL_LIBRARY_IMPORT_ERROR = imp_exc
else:
@@ -128,18 +178,16 @@ class TaskData:
self.path = path
self.play = play
self.host_data = OrderedDict()
- if sys.version_info >= (3, 7):
- self.start = time.time_ns()
- else:
- self.start = _time_ns()
+ self.start = time_ns()
self.action = action
self.args = args
+ self.dump = None
def add_host(self, host):
if host.uuid in self.host_data:
if host.status == 'included':
# concatenate task include output from multiple items
- host.result = '%s\n%s' % (self.host_data[host.uuid].result, host.result)
+ host.result = f'{self.host_data[host.uuid].result}\n{host.result}'
else:
return
@@ -156,16 +204,12 @@ class HostData:
self.name = name
self.status = status
self.result = result
- if sys.version_info >= (3, 7):
- self.finish = time.time_ns()
- else:
- self.finish = _time_ns()
+ self.finish = time_ns()
class OpenTelemetrySource(object):
def __init__(self, display):
self.ansible_playbook = ""
- self.ansible_version = None
self.session = str(uuid.uuid4())
self.host = socket.gethostname()
try:
@@ -199,7 +243,7 @@ class OpenTelemetrySource(object):
tasks_data[uuid] = TaskData(uuid, name, path, play_name, action, args)
- def finish_task(self, tasks_data, status, result):
+ def finish_task(self, tasks_data, status, result, dump):
""" record the results of a task for a single host """
task_uuid = result._task._uuid
@@ -213,12 +257,19 @@ class OpenTelemetrySource(object):
task = tasks_data[task_uuid]
- if self.ansible_version is None and hasattr(result, '_task_fields') and result._task_fields['args'].get('_ansible_version'):
- self.ansible_version = result._task_fields['args'].get('_ansible_version')
-
+ task.dump = dump
task.add_host(HostData(host_uuid, host_name, status, result))
- def generate_distributed_traces(self, otel_service_name, ansible_playbook, tasks_data, status, traceparent):
+ def generate_distributed_traces(self,
+ otel_service_name,
+ ansible_playbook,
+ tasks_data,
+ status,
+ traceparent,
+ disable_logs,
+ disable_attributes_in_logs,
+ otel_exporter_otlp_traces_protocol,
+ store_spans_in_file):
""" generate distributed traces from the collected TaskData and HostData """
tasks = []
@@ -234,7 +285,16 @@ class OpenTelemetrySource(object):
)
)
- processor = BatchSpanProcessor(OTLPSpanExporter())
+ otel_exporter = None
+ if store_spans_in_file:
+ otel_exporter = InMemorySpanExporter()
+ processor = SimpleSpanProcessor(otel_exporter)
+ else:
+ if otel_exporter_otlp_traces_protocol == 'grpc':
+ otel_exporter = GRPCOTLPSpanExporter()
+ else:
+ otel_exporter = HTTPOTLPSpanExporter()
+ processor = BatchSpanProcessor(otel_exporter)
trace.get_tracer_provider().add_span_processor(processor)
@@ -244,8 +304,7 @@ class OpenTelemetrySource(object):
start_time=parent_start_time, kind=SpanKind.SERVER) as parent:
parent.set_status(status)
# Populate trace metadata attributes
- if self.ansible_version is not None:
- parent.set_attribute("ansible.version", self.ansible_version)
+ parent.set_attribute("ansible.version", ansible_version)
parent.set_attribute("ansible.session", self.session)
parent.set_attribute("ansible.host.name", self.host)
if self.ip_address is not None:
@@ -254,12 +313,14 @@ class OpenTelemetrySource(object):
for task in tasks:
for host_uuid, host_data in task.host_data.items():
with tracer.start_as_current_span(task.name, start_time=task.start, end_on_exit=False) as span:
- self.update_span_data(task, host_data, span)
+ self.update_span_data(task, host_data, span, disable_logs, disable_attributes_in_logs)
- def update_span_data(self, task_data, host_data, span):
+ return otel_exporter
+
+ def update_span_data(self, task_data, host_data, span, disable_logs, disable_attributes_in_logs):
""" update the span with the given TaskData and HostData """
- name = '[%s] %s: %s' % (host_data.name, task_data.play, task_data.name)
+ name = f'[{host_data.name}] {task_data.play}: {task_data.name}'
message = 'success'
res = {}
@@ -267,6 +328,7 @@ class OpenTelemetrySource(object):
status = Status(status_code=StatusCode.OK)
if host_data.status != 'included':
# Support loops
+ enriched_error_message = None
if 'results' in host_data.result._result:
if host_data.status == 'failed':
message = self.get_error_message_from_results(host_data.result._result['results'], task_data.action)
@@ -289,36 +351,48 @@ class OpenTelemetrySource(object):
status = Status(status_code=StatusCode.UNSET)
span.set_status(status)
+
+ # Create the span and log attributes
+ attributes = {
+ "ansible.task.module": task_data.action,
+ "ansible.task.message": message,
+ "ansible.task.name": name,
+ "ansible.task.result": rc,
+ "ansible.task.host.name": host_data.name,
+ "ansible.task.host.status": host_data.status
+ }
if isinstance(task_data.args, dict) and "gather_facts" not in task_data.action:
names = tuple(self.transform_ansible_unicode_to_str(k) for k in task_data.args.keys())
values = tuple(self.transform_ansible_unicode_to_str(k) for k in task_data.args.values())
- self.set_span_attribute(span, ("ansible.task.args.name"), names)
- self.set_span_attribute(span, ("ansible.task.args.value"), values)
- self.set_span_attribute(span, "ansible.task.module", task_data.action)
- self.set_span_attribute(span, "ansible.task.message", message)
- self.set_span_attribute(span, "ansible.task.name", name)
- self.set_span_attribute(span, "ansible.task.result", rc)
- self.set_span_attribute(span, "ansible.task.host.name", host_data.name)
- self.set_span_attribute(span, "ansible.task.host.status", host_data.status)
+ attributes[("ansible.task.args.name")] = names
+ attributes[("ansible.task.args.value")] = values
+
+ self.set_span_attributes(span, attributes)
+
# This will allow to enrich the service map
self.add_attributes_for_service_map_if_possible(span, task_data)
+ # Send logs
+ if not disable_logs:
+ # This will avoid populating span attributes to the logs
+ span.add_event(task_data.dump, attributes={} if disable_attributes_in_logs else attributes)
+ # Close span always
span.end(end_time=host_data.finish)
- def set_span_attribute(self, span, attributeName, attributeValue):
- """ update the span attribute with the given attribute and value if not None """
+ def set_span_attributes(self, span, attributes):
+ """ update the span attributes with the given attributes if not None """
if span is None and self._display is not None:
self._display.warning('span object is None. Please double check if that is expected.')
else:
- if attributeValue is not None:
- span.set_attribute(attributeName, attributeValue)
+ if attributes is not None:
+ span.set_attributes(attributes)
def add_attributes_for_service_map_if_possible(self, span, task_data):
"""Update the span attributes with the service that the task interacted with, if possible."""
redacted_url = self.parse_and_redact_url_if_possible(task_data.args)
if redacted_url:
- self.set_span_attribute(span, "http.url", redacted_url.geturl())
+ span.set_attribute("http.url", redacted_url.geturl())
@staticmethod
def parse_and_redact_url_if_possible(args):
@@ -369,7 +443,7 @@ class OpenTelemetrySource(object):
def get_error_message_from_results(results, action):
for result in results:
if result.get('failed', False):
- return ('{0}({1}) - {2}').format(action, result.get('item', 'none'), OpenTelemetrySource.get_error_message(result))
+ return f"{action}({result.get('item', 'none')}) - {OpenTelemetrySource.get_error_message(result)}"
@staticmethod
def _last_line(text):
@@ -381,14 +455,14 @@ class OpenTelemetrySource(object):
message = result.get('msg', 'failed')
exception = result.get('exception')
stderr = result.get('stderr')
- return ('message: "{0}"\nexception: "{1}"\nstderr: "{2}"').format(message, exception, stderr)
+ return f"message: \"{message}\"\nexception: \"{exception}\"\nstderr: \"{stderr}\""
@staticmethod
def enrich_error_message_from_results(results, action):
message = ""
for result in results:
if result.get('failed', False):
- message = ('{0}({1}) - {2}\n{3}').format(action, result.get('item', 'none'), OpenTelemetrySource.enrich_error_message(result), message)
+ message = f"{action}({result.get('item', 'none')}) - {OpenTelemetrySource.enrich_error_message(result)}\n{message}"
return message
@@ -405,6 +479,8 @@ class CallbackModule(CallbackBase):
def __init__(self, display=None):
super(CallbackModule, self).__init__(display=display)
self.hide_task_arguments = None
+ self.disable_attributes_in_logs = None
+ self.disable_logs = None
self.otel_service_name = None
self.ansible_playbook = None
self.play_name = None
@@ -412,11 +488,13 @@ class CallbackModule(CallbackBase):
self.errors = 0
self.disabled = False
self.traceparent = False
+ self.store_spans_in_file = False
+ self.otel_exporter_otlp_traces_protocol = None
if OTEL_LIBRARY_IMPORT_ERROR:
- raise_from(
- AnsibleError('The `opentelemetry-api`, `opentelemetry-exporter-otlp` or `opentelemetry-sdk` must be installed to use this plugin'),
- OTEL_LIBRARY_IMPORT_ERROR)
+ raise AnsibleError(
+ 'The `opentelemetry-api`, `opentelemetry-exporter-otlp` or `opentelemetry-sdk` must be installed to use this plugin'
+ ) from OTEL_LIBRARY_IMPORT_ERROR
self.tasks_data = OrderedDict()
@@ -430,11 +508,18 @@ class CallbackModule(CallbackBase):
environment_variable = self.get_option('enable_from_environment')
if environment_variable is not None and os.environ.get(environment_variable, 'false').lower() != 'true':
self.disabled = True
- self._display.warning("The `enable_from_environment` option has been set and {0} is not enabled. "
- "Disabling the `opentelemetry` callback plugin.".format(environment_variable))
+ self._display.warning(
+ f"The `enable_from_environment` option has been set and {environment_variable} is not enabled. Disabling the `opentelemetry` callback plugin."
+ )
self.hide_task_arguments = self.get_option('hide_task_arguments')
+ self.disable_attributes_in_logs = self.get_option('disable_attributes_in_logs')
+
+ self.disable_logs = self.get_option('disable_logs')
+
+ self.store_spans_in_file = self.get_option('store_spans_in_file')
+
self.otel_service_name = self.get_option('otel_service_name')
if not self.otel_service_name:
@@ -443,6 +528,22 @@ class CallbackModule(CallbackBase):
# See https://github.com/open-telemetry/opentelemetry-specification/issues/740
self.traceparent = self.get_option('traceparent')
+ self.otel_exporter_otlp_traces_protocol = self.get_option('otel_exporter_otlp_traces_protocol')
+
+ def dump_results(self, task, result):
+ """ dump the results if disable_logs is not enabled """
+ if self.disable_logs:
+ return ""
+ # ansible.builtin.uri contains the response in the json field
+ save = dict(result._result)
+
+ if "json" in save and task.action in ("ansible.builtin.uri", "ansible.legacy.uri", "uri"):
+ save.pop("json")
+ # ansible.builtin.slurp contains the response in the content field
+ if "content" in save and task.action in ("ansible.builtin.slurp", "ansible.legacy.slurp", "slurp"):
+ save.pop("content")
+ return self._dump_results(save)
+
def v2_playbook_on_start(self, playbook):
self.ansible_playbook = basename(playbook._file_name)
@@ -491,28 +592,32 @@ class CallbackModule(CallbackBase):
self.opentelemetry.finish_task(
self.tasks_data,
status,
- result
+ result,
+ self.dump_results(self.tasks_data[result._task._uuid], result)
)
def v2_runner_on_ok(self, result):
self.opentelemetry.finish_task(
self.tasks_data,
'ok',
- result
+ result,
+ self.dump_results(self.tasks_data[result._task._uuid], result)
)
def v2_runner_on_skipped(self, result):
self.opentelemetry.finish_task(
self.tasks_data,
'skipped',
- result
+ result,
+ self.dump_results(self.tasks_data[result._task._uuid], result)
)
def v2_playbook_on_include(self, included_file):
self.opentelemetry.finish_task(
self.tasks_data,
'included',
- included_file
+ included_file,
+ ""
)
def v2_playbook_on_stats(self, stats):
@@ -520,13 +625,22 @@ class CallbackModule(CallbackBase):
status = Status(status_code=StatusCode.OK)
else:
status = Status(status_code=StatusCode.ERROR)
- self.opentelemetry.generate_distributed_traces(
+ otel_exporter = self.opentelemetry.generate_distributed_traces(
self.otel_service_name,
self.ansible_playbook,
self.tasks_data,
status,
- self.traceparent
+ self.traceparent,
+ self.disable_logs,
+ self.disable_attributes_in_logs,
+ self.otel_exporter_otlp_traces_protocol,
+ self.store_spans_in_file
)
+ if self.store_spans_in_file:
+ spans = [json.loads(span.to_json()) for span in otel_exporter.get_finished_spans()]
+ with open(self.store_spans_in_file, "w", encoding="utf-8") as output:
+ json.dump({"spans": spans}, output, indent=4)
+
def v2_runner_on_async_failed(self, result, **kwargs):
self.errors += 1
diff --git a/plugins/callback/print_task.py b/plugins/callback/print_task.py
new file mode 100644
index 0000000000..f6008c817f
--- /dev/null
+++ b/plugins/callback/print_task.py
@@ -0,0 +1,62 @@
+# Copyright (c) 2025, Max Mitschke
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import annotations
+
+DOCUMENTATION = r"""
+name: print_task
+type: aggregate
+short_description: Prints playbook task snippet to job output
+description:
+ - This plugin prints the currently executing playbook task to the job output.
+version_added: 10.7.0
+requirements:
+ - enable in configuration
+"""
+
+EXAMPLES = r"""
+ansible.cfg: |-
+ # Enable plugin
+ [defaults]
+ callbacks_enabled=community.general.print_task
+"""
+
+from yaml import load, dump
+
+try:
+ from yaml import CSafeDumper as SafeDumper
+ from yaml import CSafeLoader as SafeLoader
+except ImportError:
+ from yaml import SafeDumper, SafeLoader
+
+from ansible.plugins.callback import CallbackBase
+
+
+class CallbackModule(CallbackBase):
+ """
+ This callback module tells you how long your plays ran for.
+ """
+ CALLBACK_VERSION = 2.0
+ CALLBACK_TYPE = 'aggregate'
+ CALLBACK_NAME = 'community.general.print_task'
+
+ CALLBACK_NEEDS_ENABLED = True
+
+ def __init__(self):
+ super(CallbackModule, self).__init__()
+ self._printed_message = False
+
+ def _print_task(self, task):
+ if hasattr(task, '_ds'):
+ task_snippet = load(str([task._ds.copy()]), Loader=SafeLoader)
+ task_yaml = dump(task_snippet, sort_keys=False, Dumper=SafeDumper)
+ self._display.display(f"\n{task_yaml}\n")
+ self._printed_message = True
+
+ def v2_playbook_on_task_start(self, task, is_conditional):
+ self._printed_message = False
+
+ def v2_runner_on_start(self, host, task):
+ if not self._printed_message:
+ self._print_task(task)
diff --git a/plugins/callback/say.py b/plugins/callback/say.py
index 03d7060352..0455ee69e6 100644
--- a/plugins/callback/say.py
+++ b/plugins/callback/say.py
@@ -1,26 +1,22 @@
-# -*- coding: utf-8 -*-
# Copyright (c) 2012, Michael DeHaan,
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
# Make coding more python3-ish
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
-DOCUMENTATION = '''
- author: Unknown (!UNKNOWN)
- name: say
- type: notification
- requirements:
- - whitelisting in configuration
- - the '/usr/bin/say' command line program (standard on macOS) or 'espeak' command line program
- short_description: notify using software speech synthesizer
- description:
- - This plugin will use the 'say' or 'espeak' program to "speak" about play events.
- notes:
- - In 2.8, this callback has been renamed from C(osx_say) into M(community.general.say).
-'''
+DOCUMENTATION = r"""
+author: Unknown (!UNKNOWN)
+name: say
+type: notification
+requirements:
+ - whitelisting in configuration
+ - the C(/usr/bin/say) command line program (standard on macOS) or C(espeak) command line program
+short_description: Notify using software speech synthesizer
+description:
+ - This plugin uses C(say) or C(espeak) to "speak" about play events.
+"""
import platform
import subprocess
@@ -52,7 +48,7 @@ class CallbackModule(CallbackBase):
self.synthesizer = get_bin_path('say')
if platform.system() != 'Darwin':
# 'say' binary available, it might be GNUstep tool which doesn't support 'voice' parameter
- self._display.warning("'say' executable found but system is '%s': ignoring voice parameter" % platform.system())
+ self._display.warning(f"'say' executable found but system is '{platform.system()}': ignoring voice parameter")
else:
self.FAILED_VOICE = 'Zarvox'
self.REGULAR_VOICE = 'Trinoids'
@@ -71,7 +67,7 @@ class CallbackModule(CallbackBase):
# ansible will not call any callback if disabled is set to True
if not self.synthesizer:
self.disabled = True
- self._display.warning("Unable to find either 'say' or 'espeak' executable, plugin %s disabled" % os.path.basename(__file__))
+ self._display.warning(f"Unable to find either 'say' or 'espeak' executable, plugin {os.path.basename(__file__)} disabled")
def say(self, msg, voice):
cmd = [self.synthesizer, msg]
@@ -80,7 +76,7 @@ class CallbackModule(CallbackBase):
subprocess.call(cmd)
def runner_on_failed(self, host, res, ignore_errors=False):
- self.say("Failure on host %s" % host, self.FAILED_VOICE)
+ self.say(f"Failure on host {host}", self.FAILED_VOICE)
def runner_on_ok(self, host, res):
self.say("pew", self.LASER_VOICE)
@@ -89,13 +85,13 @@ class CallbackModule(CallbackBase):
self.say("pew", self.LASER_VOICE)
def runner_on_unreachable(self, host, res):
- self.say("Failure on host %s" % host, self.FAILED_VOICE)
+ self.say(f"Failure on host {host}", self.FAILED_VOICE)
def runner_on_async_ok(self, host, res, jid):
self.say("pew", self.LASER_VOICE)
def runner_on_async_failed(self, host, res, jid):
- self.say("Failure on host %s" % host, self.FAILED_VOICE)
+ self.say(f"Failure on host {host}", self.FAILED_VOICE)
def playbook_on_start(self):
self.say("Running Playbook", self.REGULAR_VOICE)
@@ -105,15 +101,15 @@ class CallbackModule(CallbackBase):
def playbook_on_task_start(self, name, is_conditional):
if not is_conditional:
- self.say("Starting task: %s" % name, self.REGULAR_VOICE)
+ self.say(f"Starting task: {name}", self.REGULAR_VOICE)
else:
- self.say("Notifying task: %s" % name, self.REGULAR_VOICE)
+ self.say(f"Notifying task: {name}", self.REGULAR_VOICE)
def playbook_on_setup(self):
self.say("Gathering facts", self.REGULAR_VOICE)
def playbook_on_play_start(self, name):
- self.say("Starting play: %s" % name, self.HAPPY_VOICE)
+ self.say(f"Starting play: {name}", self.HAPPY_VOICE)
def playbook_on_stats(self, stats):
self.say("Play complete", self.HAPPY_VOICE)
diff --git a/plugins/callback/selective.py b/plugins/callback/selective.py
index 78c28ec7a5..2a7dd07a3e 100644
--- a/plugins/callback/selective.py
+++ b/plugins/callback/selective.py
@@ -1,41 +1,39 @@
-# -*- coding: utf-8 -*-
# Copyright (c) Fastly, inc 2016
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
-DOCUMENTATION = '''
- author: Unknown (!UNKNOWN)
- name: selective
- type: stdout
- requirements:
- - set as main display callback
- short_description: only print certain tasks
- description:
- - This callback only prints tasks that have been tagged with C(print_action) or that have failed.
- This allows operators to focus on the tasks that provide value only.
- - Tasks that are not printed are placed with a C(.).
- - If you increase verbosity all tasks are printed.
- options:
- nocolor:
- default: False
- description: This setting allows suppressing colorizing output
- env:
- - name: ANSIBLE_NOCOLOR
- - name: ANSIBLE_SELECTIVE_DONT_COLORIZE
- ini:
- - section: defaults
- key: nocolor
- type: boolean
-'''
+DOCUMENTATION = r"""
+author: Unknown (!UNKNOWN)
+name: selective
+type: stdout
+requirements:
+ - set as main display callback
+short_description: Only print certain tasks
+description:
+ - This callback only prints tasks that have been tagged with C(print_action) or that have failed. This allows operators
+ to focus on the tasks that provide value only.
+ - Tasks that are not printed are placed with a C(.).
+ - If you increase verbosity all tasks are printed.
+options:
+ nocolor:
+ default: false
+ description: This setting allows suppressing colorizing output.
+ env:
+ - name: ANSIBLE_NOCOLOR
+ - name: ANSIBLE_SELECTIVE_DONT_COLORIZE
+ ini:
+ - section: defaults
+ key: nocolor
+ type: boolean
+"""
-EXAMPLES = """
- - ansible.builtin.debug: msg="This will not be printed"
- - ansible.builtin.debug: msg="But this will"
- tags: [print_action]
+EXAMPLES = r"""
+- ansible.builtin.debug: msg="This will not be printed"
+- ansible.builtin.debug: msg="But this will"
+ tags: [print_action]
"""
import difflib
@@ -44,26 +42,17 @@ from ansible import constants as C
from ansible.plugins.callback import CallbackBase
from ansible.module_utils.common.text.converters import to_text
-try:
- codeCodes = C.COLOR_CODES
-except AttributeError:
- # This constant was moved to ansible.constants in
- # https://github.com/ansible/ansible/commit/1202dd000f10b0e8959019484f1c3b3f9628fc67
- # (will be included in ansible-core 2.11.0). For older Ansible/ansible-base versions,
- # we include from the original location.
- from ansible.utils.color import codeCodes
-
DONT_COLORIZE = False
COLORS = {
'normal': '\033[0m',
- 'ok': '\033[{0}m'.format(codeCodes[C.COLOR_OK]),
+ 'ok': f'\x1b[{C.COLOR_CODES[C.COLOR_OK]}m',
'bold': '\033[1m',
'not_so_bold': '\033[1m\033[34m',
- 'changed': '\033[{0}m'.format(codeCodes[C.COLOR_CHANGED]),
- 'failed': '\033[{0}m'.format(codeCodes[C.COLOR_ERROR]),
+ 'changed': f'\x1b[{C.COLOR_CODES[C.COLOR_CHANGED]}m',
+ 'failed': f'\x1b[{C.COLOR_CODES[C.COLOR_ERROR]}m',
'endc': '\033[0m',
- 'skipped': '\033[{0}m'.format(codeCodes[C.COLOR_SKIP]),
+ 'skipped': f'\x1b[{C.COLOR_CODES[C.COLOR_SKIP]}m',
}
@@ -82,7 +71,7 @@ def colorize(msg, color):
if DONT_COLORIZE:
return msg
else:
- return '{0}{1}{2}'.format(COLORS[color], msg, COLORS['endc'])
+ return f"{COLORS[color]}{msg}{COLORS['endc']}"
class CallbackModule(CallbackBase):
@@ -115,15 +104,15 @@ class CallbackModule(CallbackBase):
line_length = 120
if self.last_skipped:
print()
- msg = colorize("# {0} {1}".format(task_name,
- '*' * (line_length - len(task_name))), 'bold')
+ line = f"# {task_name} "
+ msg = colorize(f"{line}{'*' * (line_length - len(line))}", 'bold')
print(msg)
def _indent_text(self, text, indent_level):
lines = text.splitlines()
result_lines = []
for l in lines:
- result_lines.append("{0}{1}".format(' ' * indent_level, l))
+ result_lines.append(f"{' ' * indent_level}{l}")
return '\n'.join(result_lines)
def _print_diff(self, diff, indent_level):
@@ -156,19 +145,19 @@ class CallbackModule(CallbackBase):
change_string = colorize('FAILED!!!', color)
else:
color = 'changed' if changed else 'ok'
- change_string = colorize("changed={0}".format(changed), color)
+ change_string = colorize(f"changed={changed}", color)
msg = colorize(msg, color)
line_length = 120
spaces = ' ' * (40 - len(name) - indent_level)
- line = "{0} * {1}{2}- {3}".format(' ' * indent_level, name, spaces, change_string)
+ line = f"{' ' * indent_level} * {name}{spaces}- {change_string}"
if len(msg) < 50:
- line += ' -- {0}'.format(msg)
- print("{0} {1}---------".format(line, '-' * (line_length - len(line))))
+ line += f' -- {msg}'
+ print(f"{line} {'-' * (line_length - len(line))}---------")
else:
- print("{0} {1}".format(line, '-' * (line_length - len(line))))
+ print(f"{line} {'-' * (line_length - len(line))}")
print(self._indent_text(msg, indent_level + 4))
if diff:
@@ -218,7 +207,7 @@ class CallbackModule(CallbackBase):
stderr = [r.get('exception', None), r.get('module_stderr', None)]
stderr = "\n".join([e for e in stderr if e]).strip()
- self._print_host_or_item(r['item'],
+ self._print_host_or_item(r[r['ansible_loop_var']],
r.get('changed', False),
to_text(r.get('msg', '')),
r.get('diff', None),
@@ -248,8 +237,10 @@ class CallbackModule(CallbackBase):
else:
color = 'ok'
- msg = '{0} : ok={1}\tchanged={2}\tfailed={3}\tunreachable={4}\trescued={5}\tignored={6}'.format(
- host, s['ok'], s['changed'], s['failures'], s['unreachable'], s['rescued'], s['ignored'])
+ msg = (
+ f"{host} : ok={s['ok']}\tchanged={s['changed']}\tfailed={s['failures']}\tunreachable="
+ f"{s['unreachable']}\trescued={s['rescued']}\tignored={s['ignored']}"
+ )
print(colorize(msg, color))
def v2_runner_on_skipped(self, result, **kwargs):
@@ -261,17 +252,15 @@ class CallbackModule(CallbackBase):
line_length = 120
spaces = ' ' * (31 - len(result._host.name) - 4)
- line = " * {0}{1}- {2}".format(colorize(result._host.name, 'not_so_bold'),
- spaces,
- colorize("skipped", 'skipped'),)
+ line = f" * {colorize(result._host.name, 'not_so_bold')}{spaces}- {colorize('skipped', 'skipped')}"
reason = result._result.get('skipped_reason', '') or \
result._result.get('skip_reason', '')
if len(reason) < 50:
- line += ' -- {0}'.format(reason)
- print("{0} {1}---------".format(line, '-' * (line_length - len(line))))
+ line += f' -- {reason}'
+ print(f"{line} {'-' * (line_length - len(line))}---------")
else:
- print("{0} {1}".format(line, '-' * (line_length - len(line))))
+ print(f"{line} {'-' * (line_length - len(line))}")
print(self._indent_text(reason, 8))
print(reason)
diff --git a/plugins/callback/slack.py b/plugins/callback/slack.py
index 46340ee44c..e1d95abe06 100644
--- a/plugins/callback/slack.py
+++ b/plugins/callback/slack.py
@@ -1,66 +1,70 @@
-# -*- coding: utf-8 -*-
# Copyright (c) 2014-2015, Matt Martz
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
# Make coding more python3-ish
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
-DOCUMENTATION = '''
- author: Unknown (!UNKNOWN)
- name: slack
- type: notification
- requirements:
- - whitelist in configuration
- - prettytable (python library)
- short_description: Sends play events to a Slack channel
+DOCUMENTATION = r"""
+author: Unknown (!UNKNOWN)
+name: slack
+type: notification
+requirements:
+ - whitelist in configuration
+ - prettytable (python library)
+short_description: Sends play events to a Slack channel
+description:
+ - This is an ansible callback plugin that sends status updates to a Slack channel during playbook execution.
+options:
+ http_agent:
description:
- - This is an ansible callback plugin that sends status updates to a Slack channel during playbook execution.
- - Before 2.4 only environment variables were available for configuring this plugin
- options:
- webhook_url:
- required: True
- description: Slack Webhook URL
- env:
- - name: SLACK_WEBHOOK_URL
- ini:
- - section: callback_slack
- key: webhook_url
- channel:
- default: "#ansible"
- description: Slack room to post in.
- env:
- - name: SLACK_CHANNEL
- ini:
- - section: callback_slack
- key: channel
- username:
- description: Username to post as.
- env:
- - name: SLACK_USERNAME
- default: ansible
- ini:
- - section: callback_slack
- key: username
- validate_certs:
- description: validate the SSL certificate of the Slack server. (For HTTPS URLs)
- env:
- - name: SLACK_VALIDATE_CERTS
- ini:
- - section: callback_slack
- key: validate_certs
- default: True
- type: bool
-'''
+ - HTTP user agent to use for requests to Slack.
+ type: string
+ version_added: "10.5.0"
+ webhook_url:
+ required: true
+ description: Slack Webhook URL.
+ type: str
+ env:
+ - name: SLACK_WEBHOOK_URL
+ ini:
+ - section: callback_slack
+ key: webhook_url
+ channel:
+ default: "#ansible"
+ description: Slack room to post in.
+ type: str
+ env:
+ - name: SLACK_CHANNEL
+ ini:
+ - section: callback_slack
+ key: channel
+ username:
+ description: Username to post as.
+ type: str
+ env:
+ - name: SLACK_USERNAME
+ default: ansible
+ ini:
+ - section: callback_slack
+ key: username
+ validate_certs:
+ description: Validate the SSL certificate of the Slack server for HTTPS URLs.
+ env:
+ - name: SLACK_VALIDATE_CERTS
+ ini:
+ - section: callback_slack
+ key: validate_certs
+ default: true
+ type: bool
+"""
import json
import os
import uuid
from ansible import context
-from ansible.module_utils.common.text.converters import to_text
from ansible.module_utils.urls import open_url
from ansible.plugins.callback import CallbackBase
@@ -106,7 +110,7 @@ class CallbackModule(CallbackBase):
self.username = self.get_option('username')
self.show_invocation = (self._display.verbosity > 1)
self.validate_certs = self.get_option('validate_certs')
-
+ self.http_agent = self.get_option('http_agent')
if self.webhook_url is None:
self.disabled = True
self._display.warning('Slack Webhook URL was not provided. The '
@@ -132,18 +136,22 @@ class CallbackModule(CallbackBase):
self._display.debug(data)
self._display.debug(self.webhook_url)
try:
- response = open_url(self.webhook_url, data=data, validate_certs=self.validate_certs,
- headers=headers)
+ response = open_url(
+ self.webhook_url,
+ data=data,
+ validate_certs=self.validate_certs,
+ headers=headers,
+ http_agent=self.http_agent,
+ )
return response.read()
except Exception as e:
- self._display.warning(u'Could not submit message to Slack: %s' %
- to_text(e))
+ self._display.warning(f'Could not submit message to Slack: {e}')
def v2_playbook_on_start(self, playbook):
self.playbook_name = os.path.basename(playbook._file_name)
title = [
- '*Playbook initiated* (_%s_)' % self.guid
+ f'*Playbook initiated* (_{self.guid}_)'
]
invocation_items = []
@@ -154,23 +162,23 @@ class CallbackModule(CallbackBase):
subset = context.CLIARGS['subset']
inventory = [os.path.abspath(i) for i in context.CLIARGS['inventory']]
- invocation_items.append('Inventory: %s' % ', '.join(inventory))
+ invocation_items.append(f"Inventory: {', '.join(inventory)}")
if tags and tags != ['all']:
- invocation_items.append('Tags: %s' % ', '.join(tags))
+ invocation_items.append(f"Tags: {', '.join(tags)}")
if skip_tags:
- invocation_items.append('Skip Tags: %s' % ', '.join(skip_tags))
+ invocation_items.append(f"Skip Tags: {', '.join(skip_tags)}")
if subset:
- invocation_items.append('Limit: %s' % subset)
+ invocation_items.append(f'Limit: {subset}')
if extra_vars:
- invocation_items.append('Extra Vars: %s' %
- ' '.join(extra_vars))
+ invocation_items.append(f"Extra Vars: {' '.join(extra_vars)}")
- title.append('by *%s*' % context.CLIARGS['remote_user'])
+ title.append(f"by *{context.CLIARGS['remote_user']}*")
- title.append('\n\n*%s*' % self.playbook_name)
+ title.append(f'\n\n*{self.playbook_name}*')
msg_items = [' '.join(title)]
if invocation_items:
- msg_items.append('```\n%s\n```' % '\n'.join(invocation_items))
+ _inv_item = '\n'.join(invocation_items)
+ msg_items.append(f'```\n{_inv_item}\n```')
msg = '\n'.join(msg_items)
@@ -190,8 +198,8 @@ class CallbackModule(CallbackBase):
def v2_playbook_on_play_start(self, play):
"""Display Play start messages"""
- name = play.name or 'Play name not specified (%s)' % play._uuid
- msg = '*Starting play* (_%s_)\n\n*%s*' % (self.guid, name)
+ name = play.name or f'Play name not specified ({play._uuid})'
+ msg = f'*Starting play* (_{self.guid}_)\n\n*{name}*'
attachments = [
{
'fallback': msg,
@@ -226,7 +234,7 @@ class CallbackModule(CallbackBase):
attachments = []
msg_items = [
- '*Playbook Complete* (_%s_)' % self.guid
+ f'*Playbook Complete* (_{self.guid}_)'
]
if failures or unreachable:
color = 'danger'
@@ -235,7 +243,7 @@ class CallbackModule(CallbackBase):
color = 'good'
msg_items.append('\n*Success!*')
- msg_items.append('```\n%s\n```' % t)
+ msg_items.append(f'```\n{t}\n```')
msg = '\n'.join(msg_items)
diff --git a/plugins/callback/splunk.py b/plugins/callback/splunk.py
index 701cbfdebd..635a3109bc 100644
--- a/plugins/callback/splunk.py
+++ b/plugins/callback/splunk.py
@@ -1,76 +1,76 @@
-# -*- coding: utf-8 -*-
# Copyright (c) Ansible Project
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
-DOCUMENTATION = '''
- name: splunk
- type: aggregate
- short_description: Sends task result events to Splunk HTTP Event Collector
- author: "Stuart Hirst (!UNKNOWN) "
+DOCUMENTATION = r"""
+name: splunk
+type: notification
+short_description: Sends task result events to Splunk HTTP Event Collector
+author: "Stuart Hirst (!UNKNOWN) "
+description:
+ - This callback plugin sends task results as JSON formatted events to a Splunk HTTP collector.
+ - The companion Splunk Monitoring & Diagnostics App is available here U(https://splunkbase.splunk.com/app/4023/).
+ - Credit to "Ryan Currah (@ryancurrah)" for original source upon which this is based.
+requirements:
+ - Whitelisting this callback plugin
+ - 'Create a HTTP Event Collector in Splunk'
+ - 'Define the URL and token in C(ansible.cfg)'
+options:
+ url:
+ description: URL to the Splunk HTTP collector source.
+ type: str
+ env:
+ - name: SPLUNK_URL
+ ini:
+ - section: callback_splunk
+ key: url
+ authtoken:
+ description: Token to authenticate the connection to the Splunk HTTP collector.
+ type: str
+ env:
+ - name: SPLUNK_AUTHTOKEN
+ ini:
+ - section: callback_splunk
+ key: authtoken
+ validate_certs:
+ description: Whether to validate certificates for connections to HEC. It is not recommended to set to V(false) except
+ when you are sure that nobody can intercept the connection between this plugin and HEC, as setting it to V(false) allows
+ man-in-the-middle attacks!
+ env:
+ - name: SPLUNK_VALIDATE_CERTS
+ ini:
+ - section: callback_splunk
+ key: validate_certs
+ type: bool
+ default: true
+ version_added: '1.0.0'
+ include_milliseconds:
+ description: Whether to include milliseconds as part of the generated timestamp field in the event sent to the Splunk
+ HTTP collector.
+ env:
+ - name: SPLUNK_INCLUDE_MILLISECONDS
+ ini:
+ - section: callback_splunk
+ key: include_milliseconds
+ type: bool
+ default: false
+ version_added: 2.0.0
+ batch:
description:
- - This callback plugin will send task results as JSON formatted events to a Splunk HTTP collector.
- - The companion Splunk Monitoring & Diagnostics App is available here "https://splunkbase.splunk.com/app/4023/"
- - Credit to "Ryan Currah (@ryancurrah)" for original source upon which this is based.
- requirements:
- - Whitelisting this callback plugin
- - 'Create a HTTP Event Collector in Splunk'
- - 'Define the url and token in ansible.cfg'
- options:
- url:
- description: URL to the Splunk HTTP collector source
- env:
- - name: SPLUNK_URL
- ini:
- - section: callback_splunk
- key: url
- authtoken:
- description: Token to authenticate the connection to the Splunk HTTP collector
- env:
- - name: SPLUNK_AUTHTOKEN
- ini:
- - section: callback_splunk
- key: authtoken
- validate_certs:
- description: Whether to validate certificates for connections to HEC. It is not recommended to set to
- C(false) except when you are sure that nobody can intercept the connection
- between this plugin and HEC, as setting it to C(false) allows man-in-the-middle attacks!
- env:
- - name: SPLUNK_VALIDATE_CERTS
- ini:
- - section: callback_splunk
- key: validate_certs
- type: bool
- default: true
- version_added: '1.0.0'
- include_milliseconds:
- description: Whether to include milliseconds as part of the generated timestamp field in the event
- sent to the Splunk HTTP collector
- env:
- - name: SPLUNK_INCLUDE_MILLISECONDS
- ini:
- - section: callback_splunk
- key: include_milliseconds
- type: bool
- default: false
- version_added: 2.0.0
- batch:
- description:
- - Correlation ID which can be set across multiple playbook executions.
- env:
- - name: SPLUNK_BATCH
- ini:
- - section: callback_splunk
- key: batch
- type: str
- version_added: 3.3.0
-'''
+ - Correlation ID which can be set across multiple playbook executions.
+ env:
+ - name: SPLUNK_BATCH
+ ini:
+ - section: callback_splunk
+ key: batch
+ type: str
+ version_added: 3.3.0
+"""
-EXAMPLES = '''
-examples: >
+EXAMPLES = r"""
+examples: >-
To enable, add this to your ansible.cfg file in the defaults block
[defaults]
callback_whitelist = community.general.splunk
@@ -81,26 +81,29 @@ examples: >
[callback_splunk]
url = http://mysplunkinstance.datapaas.io:8088/services/collector/event
authtoken = f23blad6-5965-4537-bf69-5b5a545blabla88
-'''
+"""
import json
import uuid
import socket
import getpass
-from datetime import datetime
from os.path import basename
+from ansible.module_utils.ansible_release import __version__ as ansible_version
from ansible.module_utils.urls import open_url
from ansible.parsing.ajson import AnsibleJSONEncoder
from ansible.plugins.callback import CallbackBase
+from ansible_collections.community.general.plugins.module_utils.datetime import (
+ now,
+)
+
class SplunkHTTPCollectorSource(object):
def __init__(self):
self.ansible_check_mode = False
self.ansible_playbook = ""
- self.ansible_version = ""
self.session = str(uuid.uuid4())
self.host = socket.gethostname()
self.ip_address = socket.gethostbyname(socket.gethostname())
@@ -110,10 +113,6 @@ class SplunkHTTPCollectorSource(object):
if result._task_fields['args'].get('_ansible_check_mode') is True:
self.ansible_check_mode = True
- if result._task_fields['args'].get('_ansible_version'):
- self.ansible_version = \
- result._task_fields['args'].get('_ansible_version')
-
if result._task._role:
ansible_role = str(result._task._role)
else:
@@ -134,12 +133,12 @@ class SplunkHTTPCollectorSource(object):
else:
time_format = '%Y-%m-%d %H:%M:%S +0000'
- data['timestamp'] = datetime.utcnow().strftime(time_format)
+ data['timestamp'] = now().strftime(time_format)
data['host'] = self.host
data['ip_address'] = self.ip_address
data['user'] = self.user
data['runtime'] = runtime
- data['ansible_version'] = self.ansible_version
+ data['ansible_version'] = ansible_version
data['ansible_check_mode'] = self.ansible_check_mode
data['ansible_host'] = result._host.name
data['ansible_playbook'] = self.ansible_playbook
@@ -148,15 +147,14 @@ class SplunkHTTPCollectorSource(object):
data['ansible_result'] = result._result
# This wraps the json payload in and outer json event needed by Splunk
- jsondata = json.dumps(data, cls=AnsibleJSONEncoder, sort_keys=True)
- jsondata = '{"event":' + jsondata + "}"
+ jsondata = json.dumps({"event": data}, cls=AnsibleJSONEncoder, sort_keys=True)
open_url(
url,
jsondata,
headers={
'Content-type': 'application/json',
- 'Authorization': 'Splunk ' + authtoken
+ 'Authorization': f"Splunk {authtoken}"
},
method='POST',
validate_certs=validate_certs
@@ -165,7 +163,7 @@ class SplunkHTTPCollectorSource(object):
class CallbackModule(CallbackBase):
CALLBACK_VERSION = 2.0
- CALLBACK_TYPE = 'aggregate'
+ CALLBACK_TYPE = 'notification'
CALLBACK_NAME = 'community.general.splunk'
CALLBACK_NEEDS_WHITELIST = True
@@ -181,7 +179,7 @@ class CallbackModule(CallbackBase):
def _runtime(self, result):
return (
- datetime.utcnow() -
+ now() -
self.start_datetimes[result._task._uuid]
).total_seconds()
@@ -220,10 +218,10 @@ class CallbackModule(CallbackBase):
self.splunk.ansible_playbook = basename(playbook._file_name)
def v2_playbook_on_task_start(self, task, is_conditional):
- self.start_datetimes[task._uuid] = datetime.utcnow()
+ self.start_datetimes[task._uuid] = now()
def v2_playbook_on_handler_task_start(self, task):
- self.start_datetimes[task._uuid] = datetime.utcnow()
+ self.start_datetimes[task._uuid] = now()
def v2_runner_on_ok(self, result, **kwargs):
self.splunk.send_event(
diff --git a/plugins/callback/sumologic.py b/plugins/callback/sumologic.py
index 0b6c9b6fee..3f99bf216a 100644
--- a/plugins/callback/sumologic.py
+++ b/plugins/callback/sumologic.py
@@ -1,34 +1,33 @@
-# -*- coding: utf-8 -*-
# Copyright (c) Ansible Project
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
-DOCUMENTATION = '''
+DOCUMENTATION = r"""
name: sumologic
-type: aggregate
+type: notification
short_description: Sends task result events to Sumologic
author: "Ryan Currah (@ryancurrah)"
description:
- - This callback plugin will send task results as JSON formatted events to a Sumologic HTTP collector source
+ - This callback plugin sends task results as JSON formatted events to a Sumologic HTTP collector source.
requirements:
- Whitelisting this callback plugin
- - 'Create a HTTP collector source in Sumologic and specify a custom timestamp format of C(yyyy-MM-dd HH:mm:ss ZZZZ) and a custom timestamp locator
- of C("timestamp": "(.*)")'
+ - 'Create a HTTP collector source in Sumologic and specify a custom timestamp format of V(yyyy-MM-dd HH:mm:ss ZZZZ) and
+ a custom timestamp locator of V("timestamp": "(.*\)")'
options:
url:
- description: URL to the Sumologic HTTP collector source
+ description: URL to the Sumologic HTTP collector source.
+ type: str
env:
- name: SUMOLOGIC_URL
ini:
- section: callback_sumologic
key: url
-'''
+"""
-EXAMPLES = '''
-examples: >
+EXAMPLES = r"""
+examples: |-
To enable, add this to your ansible.cfg file in the defaults block
[defaults]
callback_whitelist = community.general.sumologic
@@ -39,26 +38,29 @@ examples: >
Set the ansible.cfg variable in the callback_sumologic block
[callback_sumologic]
url = https://endpoint1.collection.us2.sumologic.com/receiver/v1/http/R8moSv1d8EW9LAUFZJ6dbxCFxwLH6kfCdcBfddlfxCbLuL-BN5twcTpMk__pYy_cDmp==
-'''
+"""
import json
import uuid
import socket
import getpass
-from datetime import datetime
from os.path import basename
+from ansible.module_utils.ansible_release import __version__ as ansible_version
from ansible.module_utils.urls import open_url
from ansible.parsing.ajson import AnsibleJSONEncoder
from ansible.plugins.callback import CallbackBase
+from ansible_collections.community.general.plugins.module_utils.datetime import (
+ now,
+)
+
class SumologicHTTPCollectorSource(object):
def __init__(self):
self.ansible_check_mode = False
self.ansible_playbook = ""
- self.ansible_version = ""
self.session = str(uuid.uuid4())
self.host = socket.gethostname()
self.ip_address = socket.gethostbyname(socket.gethostname())
@@ -68,10 +70,6 @@ class SumologicHTTPCollectorSource(object):
if result._task_fields['args'].get('_ansible_check_mode') is True:
self.ansible_check_mode = True
- if result._task_fields['args'].get('_ansible_version'):
- self.ansible_version = \
- result._task_fields['args'].get('_ansible_version')
-
if result._task._role:
ansible_role = str(result._task._role)
else:
@@ -84,13 +82,12 @@ class SumologicHTTPCollectorSource(object):
data['uuid'] = result._task._uuid
data['session'] = self.session
data['status'] = state
- data['timestamp'] = datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S '
- '+0000')
+ data['timestamp'] = now().strftime('%Y-%m-%d %H:%M:%S +0000')
data['host'] = self.host
data['ip_address'] = self.ip_address
data['user'] = self.user
data['runtime'] = runtime
- data['ansible_version'] = self.ansible_version
+ data['ansible_version'] = ansible_version
data['ansible_check_mode'] = self.ansible_check_mode
data['ansible_host'] = result._host.name
data['ansible_playbook'] = self.ansible_playbook
@@ -111,7 +108,7 @@ class SumologicHTTPCollectorSource(object):
class CallbackModule(CallbackBase):
CALLBACK_VERSION = 2.0
- CALLBACK_TYPE = 'aggregate'
+ CALLBACK_TYPE = 'notification'
CALLBACK_NAME = 'community.general.sumologic'
CALLBACK_NEEDS_WHITELIST = True
@@ -123,7 +120,7 @@ class CallbackModule(CallbackBase):
def _runtime(self, result):
return (
- datetime.utcnow() -
+ now() -
self.start_datetimes[result._task._uuid]
).total_seconds()
@@ -144,10 +141,10 @@ class CallbackModule(CallbackBase):
self.sumologic.ansible_playbook = basename(playbook._file_name)
def v2_playbook_on_task_start(self, task, is_conditional):
- self.start_datetimes[task._uuid] = datetime.utcnow()
+ self.start_datetimes[task._uuid] = now()
def v2_playbook_on_handler_task_start(self, task):
- self.start_datetimes[task._uuid] = datetime.utcnow()
+ self.start_datetimes[task._uuid] = now()
def v2_runner_on_ok(self, result, **kwargs):
self.sumologic.send_event(
diff --git a/plugins/callback/syslog_json.py b/plugins/callback/syslog_json.py
index 7ca99a9edd..657ca017f6 100644
--- a/plugins/callback/syslog_json.py
+++ b/plugins/callback/syslog_json.py
@@ -1,61 +1,58 @@
-# -*- coding: utf-8 -*-
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
# Make coding more python3-ish
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
-DOCUMENTATION = '''
- author: Unknown (!UNKNOWN)
- name: syslog_json
- type: notification
- requirements:
- - whitelist in configuration
- short_description: sends JSON events to syslog
- description:
- - This plugin logs ansible-playbook and ansible runs to a syslog server in JSON format
- - Before Ansible 2.9 only environment variables were available for configuration
- options:
- server:
- description: syslog server that will receive the event
- env:
- - name: SYSLOG_SERVER
- default: localhost
- ini:
- - section: callback_syslog_json
- key: syslog_server
- port:
- description: port on which the syslog server is listening
- env:
- - name: SYSLOG_PORT
- default: 514
- ini:
- - section: callback_syslog_json
- key: syslog_port
- facility:
- description: syslog facility to log as
- env:
- - name: SYSLOG_FACILITY
- default: user
- ini:
- - section: callback_syslog_json
- key: syslog_facility
- setup:
- description: Log setup tasks.
- env:
- - name: ANSIBLE_SYSLOG_SETUP
- type: bool
- default: true
- ini:
- - section: callback_syslog_json
- key: syslog_setup
- version_added: 4.5.0
-'''
-
-import os
-import json
+DOCUMENTATION = r"""
+author: Unknown (!UNKNOWN)
+name: syslog_json
+type: notification
+requirements:
+ - whitelist in configuration
+short_description: Sends JSON events to syslog
+description:
+ - This plugin logs ansible-playbook and ansible runs to a syslog server in JSON format.
+options:
+ server:
+ description: Syslog server that receives the event.
+ type: str
+ env:
+ - name: SYSLOG_SERVER
+ default: localhost
+ ini:
+ - section: callback_syslog_json
+ key: syslog_server
+ port:
+ description: Port on which the syslog server is listening.
+ type: int
+ env:
+ - name: SYSLOG_PORT
+ default: 514
+ ini:
+ - section: callback_syslog_json
+ key: syslog_port
+ facility:
+ description: Syslog facility to log as.
+ type: str
+ env:
+ - name: SYSLOG_FACILITY
+ default: user
+ ini:
+ - section: callback_syslog_json
+ key: syslog_facility
+ setup:
+ description: Log setup tasks.
+ env:
+ - name: ANSIBLE_SYSLOG_SETUP
+ type: bool
+ default: true
+ ini:
+ - section: callback_syslog_json
+ key: syslog_setup
+ version_added: 4.5.0
+"""
import logging
import logging.handlers
@@ -71,7 +68,7 @@ class CallbackModule(CallbackBase):
"""
CALLBACK_VERSION = 2.0
- CALLBACK_TYPE = 'aggregate'
+ CALLBACK_TYPE = 'notification'
CALLBACK_NAME = 'community.general.syslog_json'
CALLBACK_NEEDS_WHITELIST = True
diff --git a/plugins/callback/tasks_only.py b/plugins/callback/tasks_only.py
new file mode 100644
index 0000000000..3de81fc2db
--- /dev/null
+++ b/plugins/callback/tasks_only.py
@@ -0,0 +1,68 @@
+
+# Copyright (c) 2025, Felix Fontein
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import annotations
+
+DOCUMENTATION = r"""
+author: Felix Fontein (@felixfontein)
+name: tasks_only
+type: stdout
+version_added: 11.1.0
+short_description: Only show tasks
+description:
+ - Removes play start and stats marker from P(ansible.builtin.default#callback)'s output.
+ - Can be used to generate output for documentation examples.
+ For this, the O(number_of_columns) option should be set to an explicit value.
+extends_documentation_fragment:
+ - ansible.builtin.default_callback
+ - ansible.builtin.result_format_callback
+options:
+ number_of_columns:
+ description:
+ - Sets the number of columns for Ansible's display.
+ type: int
+ env:
+ - name: ANSIBLE_COLLECTIONS_TASKS_ONLY_NUMBER_OF_COLUMNS
+ result_format:
+ # Part of the ansible.builtin.result_format_callback doc fragment
+ version_added: 11.2.0
+ pretty_results:
+ # Part of the ansible.builtin.result_format_callback doc fragment
+ version_added: 11.2.0
+"""
+
+EXAMPLES = r"""
+---
+# Enable callback in ansible.cfg:
+ansible_config: |-
+ [defaults]
+ stdout_callback = community.general.tasks_only
+
+---
+# Enable callback with environment variables:
+environment_variable: |-
+ ANSIBLE_STDOUT_CALLBACK=community.general.tasks_only
+"""
+
+from ansible.plugins.callback.default import CallbackModule as Default
+
+
+class CallbackModule(Default):
+ CALLBACK_VERSION = 2.0
+ CALLBACK_TYPE = 'stdout'
+ CALLBACK_NAME = 'community.general.tasks_only'
+
+ def v2_playbook_on_play_start(self, play):
+ pass
+
+ def v2_playbook_on_stats(self, stats):
+ pass
+
+ def set_options(self, *args, **kwargs):
+ result = super(CallbackModule, self).set_options(*args, **kwargs)
+ self.number_of_columns = self.get_option("number_of_columns")
+ if self.number_of_columns is not None:
+ self._display.columns = self.number_of_columns
+ return result
diff --git a/plugins/callback/timestamp.py b/plugins/callback/timestamp.py
new file mode 100644
index 0000000000..f733fa8cb7
--- /dev/null
+++ b/plugins/callback/timestamp.py
@@ -0,0 +1,124 @@
+
+# Copyright (c) 2024, kurokobo
+# Copyright (c) 2014, Michael DeHaan
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import annotations
+
+
+DOCUMENTATION = r"""
+name: timestamp
+type: stdout
+short_description: Adds simple timestamp for each header
+version_added: 9.0.0
+description:
+ - This callback adds simple timestamp for each header.
+author: kurokobo (@kurokobo)
+options:
+ timezone:
+ description:
+ - Timezone to use for the timestamp in IANA time zone format.
+ - For example V(America/New_York), V(Asia/Tokyo)). Ignored on Python < 3.9.
+ ini:
+ - section: callback_timestamp
+ key: timezone
+ env:
+ - name: ANSIBLE_CALLBACK_TIMESTAMP_TIMEZONE
+ type: string
+ format_string:
+ description:
+ - Format of the timestamp shown to user in 1989 C standard format.
+ - Refer to L(the Python documentation,https://docs.python.org/3/library/datetime.html#strftime-and-strptime-format-codes)
+ for the available format codes.
+ ini:
+ - section: callback_timestamp
+ key: format_string
+ env:
+ - name: ANSIBLE_CALLBACK_TIMESTAMP_FORMAT_STRING
+ default: "%H:%M:%S"
+ type: string
+seealso:
+ - plugin: ansible.posix.profile_tasks
+ plugin_type: callback
+ description: >-
+ You can use P(ansible.posix.profile_tasks#callback) callback plugin to time individual tasks and overall execution time
+ with detailed timestamps.
+extends_documentation_fragment:
+ - ansible.builtin.default_callback
+ - ansible.builtin.result_format_callback
+"""
+
+
+from ansible.plugins.callback.default import CallbackModule as Default
+from ansible.utils.display import get_text_width
+from ansible.module_utils.common.text.converters import to_text
+from datetime import datetime
+import types
+import sys
+
+# Store whether the zoneinfo module is available
+_ZONEINFO_AVAILABLE = sys.version_info >= (3, 9)
+
+
+def get_datetime_now(tz):
+ """
+ Returns the current timestamp with the specified timezone
+ """
+ return datetime.now(tz=tz)
+
+
+def banner(self, msg, color=None, cows=True):
+ """
+ Prints a header-looking line with cowsay or stars with length depending on terminal width (3 minimum) with trailing timestamp
+
+ Based on the banner method of Display class from ansible.utils.display
+
+ https://github.com/ansible/ansible/blob/4403519afe89138042108e237aef317fd5f09c33/lib/ansible/utils/display.py#L511
+ """
+ timestamp = get_datetime_now(self.timestamp_tzinfo).strftime(self.timestamp_format_string)
+ timestamp_len = get_text_width(timestamp) + 1 # +1 for leading space
+
+ msg = to_text(msg)
+ if self.b_cowsay and cows:
+ try:
+ self.banner_cowsay(f"{msg} @ {timestamp}")
+ return
+ except OSError:
+ self.warning("somebody cleverly deleted cowsay or something during the PB run. heh.")
+
+ msg = msg.strip()
+ try:
+ star_len = self.columns - get_text_width(msg) - timestamp_len
+ except EnvironmentError:
+ star_len = self.columns - len(msg) - timestamp_len
+ if star_len <= 3:
+ star_len = 3
+ stars = "*" * star_len
+ self.display(f"\n{msg} {stars} {timestamp}", color=color)
+
+
+class CallbackModule(Default):
+ CALLBACK_VERSION = 2.0
+ CALLBACK_TYPE = "stdout"
+ CALLBACK_NAME = "community.general.timestamp"
+
+ def __init__(self):
+ super(CallbackModule, self).__init__()
+
+ # Replace the banner method of the display object with the custom one
+ self._display.banner = types.MethodType(banner, self._display)
+
+ def set_options(self, task_keys=None, var_options=None, direct=None):
+ super(CallbackModule, self).set_options(task_keys=task_keys, var_options=var_options, direct=direct)
+
+ # Store zoneinfo for specified timezone if available
+ tzinfo = None
+ if _ZONEINFO_AVAILABLE and self.get_option("timezone"):
+ from zoneinfo import ZoneInfo
+
+ tzinfo = ZoneInfo(self.get_option("timezone"))
+
+ # Inject options into the display object
+ setattr(self._display, "timestamp_tzinfo", tzinfo)
+ setattr(self._display, "timestamp_format_string", self.get_option("format_string"))
diff --git a/plugins/callback/unixy.py b/plugins/callback/unixy.py
index fa26be8238..d155aefc66 100644
--- a/plugins/callback/unixy.py
+++ b/plugins/callback/unixy.py
@@ -1,25 +1,23 @@
-# -*- coding: utf-8 -*-
-# Copyright (c) 2017, Allyson Bowles <@akatch>
+# Copyright (c) 2023, Al Bowles <@akatch>
# Copyright (c) 2012-2014, Michael DeHaan
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
# Make coding more python3-ish
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
-DOCUMENTATION = '''
- name: unixy
- type: stdout
- author: Allyson Bowles (@akatch)
- short_description: condensed Ansible output
- description:
- - Consolidated Ansible output in the style of LINUX/UNIX startup logs.
- extends_documentation_fragment:
- - default_callback
- requirements:
- - set as stdout in configuration
-'''
+DOCUMENTATION = r"""
+name: unixy
+type: stdout
+author: Al Bowles (@akatch)
+short_description: Condensed Ansible output
+description:
+ - Consolidated Ansible output in the style of LINUX/UNIX startup logs.
+extends_documentation_fragment:
+ - default_callback
+requirements:
+ - set as stdout in configuration
+"""
from os.path import basename
from ansible import constants as C
@@ -40,7 +38,6 @@ class CallbackModule(CallbackModule_default):
- Only display task names if the task runs on at least one host
- Add option to display all hostnames on a single line in the appropriate result color (failures may have a separate line)
- Consolidate stats display
- - Display whether run is in --check mode
- Don't show play name if no hosts found
'''
@@ -63,59 +60,71 @@ class CallbackModule(CallbackModule_default):
def _preprocess_result(self, result):
self.delegated_vars = result._result.get('_ansible_delegated_vars', None)
- self._handle_exception(result._result, use_stderr=self.display_failed_stderr)
+ self._handle_exception(result._result, use_stderr=self.get_option('display_failed_stderr'))
self._handle_warnings(result._result)
def _process_result_output(self, result, msg):
task_host = result._host.get_name()
- task_result = "%s %s" % (task_host, msg)
+ task_result = f"{task_host} {msg}"
if self._run_is_verbose(result):
- task_result = "%s %s: %s" % (task_host, msg, self._dump_results(result._result, indent=4))
+ task_result = f"{task_host} {msg}: {self._dump_results(result._result, indent=4)}"
return task_result
if self.delegated_vars:
task_delegate_host = self.delegated_vars['ansible_host']
- task_result = "%s -> %s %s" % (task_host, task_delegate_host, msg)
+ task_result = f"{task_host} -> {task_delegate_host} {msg}"
if result._result.get('msg') and result._result.get('msg') != "All items completed":
- task_result += " | msg: " + to_text(result._result.get('msg'))
+ task_result += f" | msg: {to_text(result._result.get('msg'))}"
if result._result.get('stdout'):
- task_result += " | stdout: " + result._result.get('stdout')
+ task_result += f" | stdout: {result._result.get('stdout')}"
if result._result.get('stderr'):
- task_result += " | stderr: " + result._result.get('stderr')
+ task_result += f" | stderr: {result._result.get('stderr')}"
return task_result
def v2_playbook_on_task_start(self, task, is_conditional):
self._get_task_display_name(task)
if self.task_display_name is not None:
- self._display.display("%s..." % self.task_display_name)
+ if task.check_mode and self.get_option('check_mode_markers'):
+ self._display.display(f"{self.task_display_name} (check mode)...")
+ else:
+ self._display.display(f"{self.task_display_name}...")
def v2_playbook_on_handler_task_start(self, task):
self._get_task_display_name(task)
if self.task_display_name is not None:
- self._display.display("%s (via handler)... " % self.task_display_name)
+ if task.check_mode and self.get_option('check_mode_markers'):
+ self._display.display(f"{self.task_display_name} (via handler in check mode)... ")
+ else:
+ self._display.display(f"{self.task_display_name} (via handler)... ")
def v2_playbook_on_play_start(self, play):
name = play.get_name().strip()
- if name and play.hosts:
- msg = u"\n- %s on hosts: %s -" % (name, ",".join(play.hosts))
+ if play.check_mode and self.get_option('check_mode_markers'):
+ if name and play.hosts:
+ msg = f"\n- {name} (in check mode) on hosts: {','.join(play.hosts)} -"
+ else:
+ msg = "- check mode -"
else:
- msg = u"---"
+ if name and play.hosts:
+ msg = f"\n- {name} on hosts: {','.join(play.hosts)} -"
+ else:
+ msg = "---"
self._display.display(msg)
def v2_runner_on_skipped(self, result, ignore_errors=False):
- if self.display_skipped_hosts:
+ if self.get_option('display_skipped_hosts'):
self._preprocess_result(result)
display_color = C.COLOR_SKIP
msg = "skipped"
task_result = self._process_result_output(result, msg)
- self._display.display(" " + task_result, display_color)
+ self._display.display(f" {task_result}", display_color)
else:
return
@@ -125,10 +134,10 @@ class CallbackModule(CallbackModule_default):
msg = "failed"
item_value = self._get_item_label(result._result)
if item_value:
- msg += " | item: %s" % (item_value,)
+ msg += f" | item: {item_value}"
task_result = self._process_result_output(result, msg)
- self._display.display(" " + task_result, display_color, stderr=self.display_failed_stderr)
+ self._display.display(f" {task_result}", display_color, stderr=self.get_option('display_failed_stderr'))
def v2_runner_on_ok(self, result, msg="ok", display_color=C.COLOR_OK):
self._preprocess_result(result)
@@ -138,13 +147,13 @@ class CallbackModule(CallbackModule_default):
msg = "done"
item_value = self._get_item_label(result._result)
if item_value:
- msg += " | item: %s" % (item_value,)
+ msg += f" | item: {item_value}"
display_color = C.COLOR_CHANGED
task_result = self._process_result_output(result, msg)
- self._display.display(" " + task_result, display_color)
- elif self.display_ok_hosts:
+ self._display.display(f" {task_result}", display_color)
+ elif self.get_option('display_ok_hosts'):
task_result = self._process_result_output(result, msg)
- self._display.display(" " + task_result, display_color)
+ self._display.display(f" {task_result}", display_color)
def v2_runner_item_on_skipped(self, result):
self.v2_runner_on_skipped(result)
@@ -162,7 +171,7 @@ class CallbackModule(CallbackModule_default):
display_color = C.COLOR_UNREACHABLE
task_result = self._process_result_output(result, msg)
- self._display.display(" " + task_result, display_color, stderr=self.display_failed_stderr)
+ self._display.display(f" {task_result}", display_color, stderr=self.get_option('display_failed_stderr'))
def v2_on_file_diff(self, result):
if result._task.loop and 'results' in result._result:
@@ -184,40 +193,34 @@ class CallbackModule(CallbackModule_default):
# TODO how else can we display these?
t = stats.summarize(h)
- self._display.display(u" %s : %s %s %s %s %s %s" % (
- hostcolor(h, t),
- colorize(u'ok', t['ok'], C.COLOR_OK),
- colorize(u'changed', t['changed'], C.COLOR_CHANGED),
- colorize(u'unreachable', t['unreachable'], C.COLOR_UNREACHABLE),
- colorize(u'failed', t['failures'], C.COLOR_ERROR),
- colorize(u'rescued', t['rescued'], C.COLOR_OK),
- colorize(u'ignored', t['ignored'], C.COLOR_WARN)),
+ self._display.display(
+ f" {hostcolor(h, t)} : {colorize('ok', t['ok'], C.COLOR_OK)} {colorize('changed', t['changed'], C.COLOR_CHANGED)} "
+ f"{colorize('unreachable', t['unreachable'], C.COLOR_UNREACHABLE)} {colorize('failed', t['failures'], C.COLOR_ERROR)} "
+ f"{colorize('rescued', t['rescued'], C.COLOR_OK)} {colorize('ignored', t['ignored'], C.COLOR_WARN)}",
screen_only=True
)
- self._display.display(u" %s : %s %s %s %s %s %s" % (
- hostcolor(h, t, False),
- colorize(u'ok', t['ok'], None),
- colorize(u'changed', t['changed'], None),
- colorize(u'unreachable', t['unreachable'], None),
- colorize(u'failed', t['failures'], None),
- colorize(u'rescued', t['rescued'], None),
- colorize(u'ignored', t['ignored'], None)),
+ self._display.display(
+ f" {hostcolor(h, t, False)} : {colorize('ok', t['ok'], None)} {colorize('changed', t['changed'], None)} "
+ f"{colorize('unreachable', t['unreachable'], None)} {colorize('failed', t['failures'], None)} {colorize('rescued', t['rescued'], None)} "
+ f"{colorize('ignored', t['ignored'], None)}",
log_only=True
)
- if stats.custom and self.show_custom_stats:
+ if stats.custom and self.get_option('show_custom_stats'):
self._display.banner("CUSTOM STATS: ")
# per host
# TODO: come up with 'pretty format'
for k in sorted(stats.custom.keys()):
if k == '_run':
continue
- self._display.display('\t%s: %s' % (k, self._dump_results(stats.custom[k], indent=1).replace('\n', '')))
+ stat_val = self._dump_results(stats.custom[k], indent=1).replace('\n', '')
+ self._display.display(f'\t{k}: {stat_val}')
# print per run custom stats
if '_run' in stats.custom:
self._display.display("", screen_only=True)
- self._display.display('\tRUN: %s' % self._dump_results(stats.custom['_run'], indent=1).replace('\n', ''))
+ stat_val_run = self._dump_results(stats.custom['_run'], indent=1).replace('\n', '')
+ self._display.display(f'\tRUN: {stat_val_run}')
self._display.display("", screen_only=True)
def v2_playbook_on_no_hosts_matched(self):
@@ -227,22 +230,24 @@ class CallbackModule(CallbackModule_default):
self._display.display(" Ran out of hosts!", color=C.COLOR_ERROR)
def v2_playbook_on_start(self, playbook):
- # TODO display whether this run is happening in check mode
- self._display.display("Executing playbook %s" % basename(playbook._file_name))
+ if context.CLIARGS['check'] and self.get_option('check_mode_markers'):
+ self._display.display(f"Executing playbook {basename(playbook._file_name)} in check mode")
+ else:
+ self._display.display(f"Executing playbook {basename(playbook._file_name)}")
# show CLI arguments
if self._display.verbosity > 3:
if context.CLIARGS.get('args'):
- self._display.display('Positional arguments: %s' % ' '.join(context.CLIARGS['args']),
+ self._display.display(f"Positional arguments: {' '.join(context.CLIARGS['args'])}",
color=C.COLOR_VERBOSE, screen_only=True)
for argument in (a for a in context.CLIARGS if a != 'args'):
val = context.CLIARGS[argument]
if val:
- self._display.vvvv('%s: %s' % (argument, val))
+ self._display.vvvv(f'{argument}: {val}')
def v2_runner_retry(self, result):
- msg = " Retrying... (%d of %d)" % (result._result['attempts'], result._result['retries'])
+ msg = f" Retrying... ({result._result['attempts']} of {result._result['retries']})"
if self._run_is_verbose(result):
- msg += "Result was: %s" % self._dump_results(result._result)
+ msg += f"Result was: {self._dump_results(result._result)}"
self._display.display(msg, color=C.COLOR_DEBUG)
diff --git a/plugins/callback/yaml.py b/plugins/callback/yaml.py
deleted file mode 100644
index 81d59e2e70..0000000000
--- a/plugins/callback/yaml.py
+++ /dev/null
@@ -1,132 +0,0 @@
-# -*- coding: utf-8 -*-
-# Copyright (c) 2017 Ansible Project
-# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# Make coding more python3-ish
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-DOCUMENTATION = '''
- author: Unknown (!UNKNOWN)
- name: yaml
- type: stdout
- short_description: yaml-ized Ansible screen output
- description:
- - Ansible output that can be quite a bit easier to read than the
- default JSON formatting.
- extends_documentation_fragment:
- - default_callback
- requirements:
- - set as stdout in configuration
-'''
-
-import yaml
-import json
-import re
-import string
-import sys
-
-from ansible.module_utils.common.text.converters import to_bytes, to_text
-from ansible.module_utils.six import string_types
-from ansible.parsing.yaml.dumper import AnsibleDumper
-from ansible.plugins.callback import CallbackBase, strip_internal_keys, module_response_deepcopy
-from ansible.plugins.callback.default import CallbackModule as Default
-
-
-# from http://stackoverflow.com/a/15423007/115478
-def should_use_block(value):
- """Returns true if string should be in block format"""
- for c in u"\u000a\u000d\u001c\u001d\u001e\u0085\u2028\u2029":
- if c in value:
- return True
- return False
-
-
-class MyDumper(AnsibleDumper):
- def represent_scalar(self, tag, value, style=None):
- """Uses block style for multi-line strings"""
- if style is None:
- if should_use_block(value):
- style = '|'
- # we care more about readable than accuracy, so...
- # ...no trailing space
- value = value.rstrip()
- # ...and non-printable characters
- value = ''.join(x for x in value if x in string.printable or ord(x) >= 0xA0)
- # ...tabs prevent blocks from expanding
- value = value.expandtabs()
- # ...and odd bits of whitespace
- value = re.sub(r'[\x0b\x0c\r]', '', value)
- # ...as does trailing space
- value = re.sub(r' +\n', '\n', value)
- else:
- style = self.default_style
- node = yaml.representer.ScalarNode(tag, value, style=style)
- if self.alias_key is not None:
- self.represented_objects[self.alias_key] = node
- return node
-
-
-class CallbackModule(Default):
-
- """
- Variation of the Default output which uses nicely readable YAML instead
- of JSON for printing results.
- """
-
- CALLBACK_VERSION = 2.0
- CALLBACK_TYPE = 'stdout'
- CALLBACK_NAME = 'community.general.yaml'
-
- def __init__(self):
- super(CallbackModule, self).__init__()
-
- def _dump_results(self, result, indent=None, sort_keys=True, keep_invocation=False):
- if result.get('_ansible_no_log', False):
- return json.dumps(dict(censored="The output has been hidden due to the fact that 'no_log: true' was specified for this result"))
-
- # All result keys stating with _ansible_ are internal, so remove them from the result before we output anything.
- abridged_result = strip_internal_keys(module_response_deepcopy(result))
-
- # remove invocation unless specifically wanting it
- if not keep_invocation and self._display.verbosity < 3 and 'invocation' in result:
- del abridged_result['invocation']
-
- # remove diff information from screen output
- if self._display.verbosity < 3 and 'diff' in result:
- del abridged_result['diff']
-
- # remove exception from screen output
- if 'exception' in abridged_result:
- del abridged_result['exception']
-
- dumped = ''
-
- # put changed and skipped into a header line
- if 'changed' in abridged_result:
- dumped += 'changed=' + str(abridged_result['changed']).lower() + ' '
- del abridged_result['changed']
-
- if 'skipped' in abridged_result:
- dumped += 'skipped=' + str(abridged_result['skipped']).lower() + ' '
- del abridged_result['skipped']
-
- # if we already have stdout, we don't need stdout_lines
- if 'stdout' in abridged_result and 'stdout_lines' in abridged_result:
- abridged_result['stdout_lines'] = ''
-
- # if we already have stderr, we don't need stderr_lines
- if 'stderr' in abridged_result and 'stderr_lines' in abridged_result:
- abridged_result['stderr_lines'] = ''
-
- if abridged_result:
- dumped += '\n'
- dumped += to_text(yaml.dump(abridged_result, allow_unicode=True, width=1000, Dumper=MyDumper, default_flow_style=False))
-
- # indent by a couple of spaces
- dumped = '\n '.join(dumped.split('\n')).rstrip()
- return dumped
-
- def _serialize_diff(self, diff):
- return to_text(yaml.dump(diff, allow_unicode=True, width=1000, Dumper=AnsibleDumper, default_flow_style=False))
diff --git a/plugins/connection/chroot.py b/plugins/connection/chroot.py
index cbbf9612e9..35f7312326 100644
--- a/plugins/connection/chroot.py
+++ b/plugins/connection/chroot.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
# Based on local.py (c) 2012, Michael DeHaan
#
# (c) 2013, Maykel Moya
@@ -7,56 +6,80 @@
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
-DOCUMENTATION = '''
- author: Maykel Moya (!UNKNOWN)
- name: chroot
- short_description: Interact with local chroot
+DOCUMENTATION = r"""
+author: Maykel Moya (!UNKNOWN)
+name: chroot
+short_description: Interact with local chroot
+description:
+ - Run commands or put/fetch files to an existing chroot on the Ansible controller.
+options:
+ remote_addr:
description:
- - Run commands or put/fetch files to an existing chroot on the Ansible controller.
- options:
- remote_addr:
- description:
- - The path of the chroot you want to access.
- default: inventory_hostname
- vars:
- - name: ansible_host
- executable:
- description:
- - User specified executable shell
- ini:
- - section: defaults
- key: executable
- env:
- - name: ANSIBLE_EXECUTABLE
- vars:
- - name: ansible_executable
- default: /bin/sh
- chroot_exe:
- description:
- - User specified chroot binary
- ini:
- - section: chroot_connection
- key: exe
- env:
- - name: ANSIBLE_CHROOT_EXE
- vars:
- - name: ansible_chroot_exe
- default: chroot
-'''
+ - The path of the chroot you want to access.
+ type: string
+ default: inventory_hostname
+ vars:
+ - name: inventory_hostname
+ - name: ansible_host
+ executable:
+ description:
+ - User specified executable shell.
+ type: string
+ ini:
+ - section: defaults
+ key: executable
+ env:
+ - name: ANSIBLE_EXECUTABLE
+ vars:
+ - name: ansible_executable
+ default: /bin/sh
+ chroot_exe:
+ description:
+ - User specified chroot binary.
+ type: string
+ ini:
+ - section: chroot_connection
+ key: exe
+ env:
+ - name: ANSIBLE_CHROOT_EXE
+ vars:
+ - name: ansible_chroot_exe
+ default: chroot
+ disable_root_check:
+ description:
+ - Do not check that the user is not root.
+ ini:
+ - section: chroot_connection
+ key: disable_root_check
+ env:
+ - name: ANSIBLE_CHROOT_DISABLE_ROOT_CHECK
+ vars:
+ - name: ansible_chroot_disable_root_check
+ default: false
+ type: bool
+ version_added: 7.3.0
+"""
+
+EXAMPLES = r"""
+- hosts: chroots
+ connection: community.general.chroot
+ tasks:
+ - debug:
+ msg: "This is coming from chroot environment"
+"""
import os
import os.path
import subprocess
import traceback
+from shlex import quote as shlex_quote
from ansible.errors import AnsibleError
from ansible.module_utils.basic import is_executable
from ansible.module_utils.common.process import get_bin_path
-from ansible.module_utils.six.moves import shlex_quote
-from ansible.module_utils.common.text.converters import to_bytes, to_native
+from ansible.module_utils.common.text.converters import to_bytes
from ansible.plugins.connection import ConnectionBase, BUFSIZE
from ansible.utils.display import Display
@@ -80,31 +103,32 @@ class Connection(ConnectionBase):
self.chroot = self._play_context.remote_addr
- if os.geteuid() != 0:
- raise AnsibleError("chroot connection requires running as root")
-
- # we're running as root on the local system so do some
- # trivial checks for ensuring 'host' is actually a chroot'able dir
+ # do some trivial checks for ensuring 'host' is actually a chroot'able dir
if not os.path.isdir(self.chroot):
- raise AnsibleError("%s is not a directory" % self.chroot)
+ raise AnsibleError(f"{self.chroot} is not a directory")
chrootsh = os.path.join(self.chroot, 'bin/sh')
# Want to check for a usable bourne shell inside the chroot.
# is_executable() == True is sufficient. For symlinks it
# gets really complicated really fast. So we punt on finding that
- # out. As long as it's a symlink we assume that it will work
+ # out. As long as it is a symlink we assume that it will work
if not (is_executable(chrootsh) or (os.path.lexists(chrootsh) and os.path.islink(chrootsh))):
- raise AnsibleError("%s does not look like a chrootable dir (/bin/sh missing)" % self.chroot)
+ raise AnsibleError(f"{self.chroot} does not look like a chrootable dir (/bin/sh missing)")
def _connect(self):
""" connect to the chroot """
+ if not self.get_option('disable_root_check') and os.geteuid() != 0:
+ raise AnsibleError(
+ "chroot connection requires running as root. "
+ "You can override this check with the `disable_root_check` option.")
+
if os.path.isabs(self.get_option('chroot_exe')):
self.chroot_cmd = self.get_option('chroot_exe')
else:
try:
self.chroot_cmd = get_bin_path(self.get_option('chroot_exe'))
except ValueError as e:
- raise AnsibleError(to_native(e))
+ raise AnsibleError(str(e))
super(Connection, self)._connect()
if not self._connected:
@@ -122,7 +146,7 @@ class Connection(ConnectionBase):
executable = self.get_option('executable')
local_cmd = [self.chroot_cmd, self.chroot, executable, '-c', cmd]
- display.vvv("EXEC %s" % local_cmd, host=self.chroot)
+ display.vvv(f"EXEC {local_cmd}", host=self.chroot)
local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd]
p = subprocess.Popen(local_cmd, shell=False, stdin=stdin,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
@@ -147,7 +171,7 @@ class Connection(ConnectionBase):
exist in any given chroot. So for now we're choosing "/" instead.
This also happens to be the former default.
- Can revisit using $HOME instead if it's a problem
+ Can revisit using $HOME instead if it is a problem
"""
if not remote_path.startswith(os.path.sep):
remote_path = os.path.join(os.path.sep, remote_path)
@@ -156,7 +180,7 @@ class Connection(ConnectionBase):
def put_file(self, in_path, out_path):
""" transfer a file from local to chroot """
super(Connection, self).put_file(in_path, out_path)
- display.vvv("PUT %s TO %s" % (in_path, out_path), host=self.chroot)
+ display.vvv(f"PUT {in_path} TO {out_path}", host=self.chroot)
out_path = shlex_quote(self._prefix_login_path(out_path))
try:
@@ -166,27 +190,27 @@ class Connection(ConnectionBase):
else:
count = ''
try:
- p = self._buffered_exec_command('dd of=%s bs=%s%s' % (out_path, BUFSIZE, count), stdin=in_file)
+ p = self._buffered_exec_command(f'dd of={out_path} bs={BUFSIZE}{count}', stdin=in_file)
except OSError:
raise AnsibleError("chroot connection requires dd command in the chroot")
try:
stdout, stderr = p.communicate()
except Exception:
traceback.print_exc()
- raise AnsibleError("failed to transfer file %s to %s" % (in_path, out_path))
+ raise AnsibleError(f"failed to transfer file {in_path} to {out_path}")
if p.returncode != 0:
- raise AnsibleError("failed to transfer file %s to %s:\n%s\n%s" % (in_path, out_path, stdout, stderr))
+ raise AnsibleError(f"failed to transfer file {in_path} to {out_path}:\n{stdout}\n{stderr}")
except IOError:
- raise AnsibleError("file or module does not exist at: %s" % in_path)
+ raise AnsibleError(f"file or module does not exist at: {in_path}")
def fetch_file(self, in_path, out_path):
""" fetch a file from chroot to local """
super(Connection, self).fetch_file(in_path, out_path)
- display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self.chroot)
+ display.vvv(f"FETCH {in_path} TO {out_path}", host=self.chroot)
in_path = shlex_quote(self._prefix_login_path(in_path))
try:
- p = self._buffered_exec_command('dd if=%s bs=%s' % (in_path, BUFSIZE))
+ p = self._buffered_exec_command(f'dd if={in_path} bs={BUFSIZE}')
except OSError:
raise AnsibleError("chroot connection requires dd command in the chroot")
@@ -198,10 +222,10 @@ class Connection(ConnectionBase):
chunk = p.stdout.read(BUFSIZE)
except Exception:
traceback.print_exc()
- raise AnsibleError("failed to transfer file %s to %s" % (in_path, out_path))
+ raise AnsibleError(f"failed to transfer file {in_path} to {out_path}")
stdout, stderr = p.communicate()
if p.returncode != 0:
- raise AnsibleError("failed to transfer file %s to %s:\n%s\n%s" % (in_path, out_path, stdout, stderr))
+ raise AnsibleError(f"failed to transfer file {in_path} to {out_path}:\n{stdout}\n{stderr}")
def close(self):
""" terminate the connection; nothing to do here """
diff --git a/plugins/connection/funcd.py b/plugins/connection/funcd.py
index 9f37f791de..86d050c1db 100644
--- a/plugins/connection/funcd.py
+++ b/plugins/connection/funcd.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
# Based on local.py (c) 2012, Michael DeHaan
# Based on chroot.py (c) 2013, Maykel Moya
# Copyright (c) 2013, Michael Scherer
@@ -6,26 +5,26 @@
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
-DOCUMENTATION = '''
- author: Michael Scherer (@mscherer)
- name: funcd
- short_description: Use funcd to connect to target
+DOCUMENTATION = r"""
+author: Michael Scherer (@mscherer)
+name: funcd
+short_description: Use funcd to connect to target
+description:
+ - This transport permits you to use Ansible over Func.
+ - For people who have already setup func and that wish to play with ansible, this permit to move gradually to ansible without
+ having to redo completely the setup of the network.
+options:
+ remote_addr:
description:
- - This transport permits you to use Ansible over Func.
- - For people who have already setup func and that wish to play with ansible,
- this permit to move gradually to ansible without having to redo completely the setup of the network.
- options:
- remote_addr:
- description:
- - The path of the chroot you want to access.
- default: inventory_hostname
- vars:
- - name: ansible_host
- - name: ansible_func_host
-'''
+ - The path of the chroot you want to access.
+ type: string
+ default: inventory_hostname
+ vars:
+ - name: ansible_host
+ - name: ansible_func_host
+"""
HAVE_FUNC = False
try:
@@ -70,8 +69,8 @@ class Connection(ConnectionBase):
if in_data:
raise AnsibleError("Internal Error: this module does not support optimized module pipelining")
- # totally ignores privlege escalation
- display.vvv("EXEC %s" % cmd, host=self.host)
+ # totally ignores privilege escalation
+ display.vvv(f"EXEC {cmd}", host=self.host)
p = self.client.command.run(cmd)[self.host]
return p[0], p[1], p[2]
@@ -86,14 +85,14 @@ class Connection(ConnectionBase):
""" transfer a file from local to remote """
out_path = self._normalize_path(out_path, '/')
- display.vvv("PUT %s TO %s" % (in_path, out_path), host=self.host)
+ display.vvv(f"PUT {in_path} TO {out_path}", host=self.host)
self.client.local.copyfile.send(in_path, out_path)
def fetch_file(self, in_path, out_path):
""" fetch a file from remote to local """
in_path = self._normalize_path(in_path, '/')
- display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self.host)
+ display.vvv(f"FETCH {in_path} TO {out_path}", host=self.host)
# need to use a tmp dir due to difference of semantic for getfile
# ( who take a # directory as destination) and fetch_file, who
# take a file directly
diff --git a/plugins/connection/incus.py b/plugins/connection/incus.py
new file mode 100644
index 0000000000..3dfd37764b
--- /dev/null
+++ b/plugins/connection/incus.py
@@ -0,0 +1,274 @@
+# Based on lxd.py (c) 2016, Matt Clay
+# (c) 2023, Stephane Graber
+# Copyright (c) 2023 Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import annotations
+
+DOCUMENTATION = r"""
+author: Stéphane Graber (@stgraber)
+name: incus
+short_description: Run tasks in Incus instances using the Incus CLI
+description:
+ - Run commands or put/fetch files to an existing Incus instance using Incus CLI.
+version_added: "8.2.0"
+options:
+ remote_addr:
+ description:
+ - The instance identifier.
+ type: string
+ default: inventory_hostname
+ vars:
+ - name: inventory_hostname
+ - name: ansible_host
+ - name: ansible_incus_host
+ executable:
+ description:
+ - The shell to use for execution inside the instance.
+ type: string
+ default: /bin/sh
+ vars:
+ - name: ansible_executable
+ - name: ansible_incus_executable
+ incus_become_method:
+ description:
+ - Become command used to switch to a non-root user.
+ - Is only used when O(remote_user) is not V(root).
+ type: str
+ default: /bin/su
+ vars:
+ - name: incus_become_method
+ version_added: 10.4.0
+ remote:
+ description:
+ - The name of the Incus remote to use (per C(incus remote list)).
+ - Remotes are used to access multiple servers from a single client.
+ type: string
+ default: local
+ vars:
+ - name: ansible_incus_remote
+ remote_user:
+ description:
+ - User to login/authenticate as.
+ - Can be set from the CLI with the C(--user) or C(-u) options.
+ type: string
+ default: root
+ vars:
+ - name: ansible_user
+ env:
+ - name: ANSIBLE_REMOTE_USER
+ ini:
+ - section: defaults
+ key: remote_user
+ keyword:
+ - name: remote_user
+ version_added: 10.4.0
+ project:
+ description:
+ - The name of the Incus project to use (per C(incus project list)).
+ - Projects are used to divide the instances running on a server.
+ type: string
+ default: default
+ vars:
+ - name: ansible_incus_project
+"""
+
+import os
+from subprocess import call, Popen, PIPE
+
+from ansible.errors import AnsibleError, AnsibleConnectionFailure, AnsibleFileNotFound
+from ansible.module_utils.common.process import get_bin_path
+from ansible.module_utils.common.text.converters import to_bytes, to_text
+from ansible.plugins.connection import ConnectionBase
+
+
+class Connection(ConnectionBase):
+ """ Incus based connections """
+
+ transport = "incus"
+ has_pipelining = True
+
+ def __init__(self, play_context, new_stdin, *args, **kwargs):
+ super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs)
+
+ self._incus_cmd = get_bin_path("incus")
+
+ if not self._incus_cmd:
+ raise AnsibleError("incus command not found in PATH")
+
+ def _connect(self):
+ """connect to Incus (nothing to do here) """
+ super(Connection, self)._connect()
+
+ if not self._connected:
+ self._display.vvv(f"ESTABLISH Incus CONNECTION FOR USER: {self.get_option('remote_user')}",
+ host=self._instance())
+ self._connected = True
+
+ def _build_command(self, cmd) -> str:
+ """build the command to execute on the incus host"""
+
+ exec_cmd = [
+ self._incus_cmd,
+ "--project", self.get_option("project"),
+ "exec",
+ f"{self.get_option('remote')}:{self._instance()}",
+ "--"]
+
+ if self.get_option("remote_user") != "root":
+ self._display.vvv(
+ f"INFO: Running as non-root user: {self.get_option('remote_user')}, \
+ trying to run 'incus exec' with become method: {self.get_option('incus_become_method')}",
+ host=self._instance(),
+ )
+ exec_cmd.extend(
+ [self.get_option("incus_become_method"), self.get_option("remote_user"), "-c"]
+ )
+
+ exec_cmd.extend([self.get_option("executable"), "-c", cmd])
+
+ return exec_cmd
+
+ def _instance(self):
+ # Return only the leading part of the FQDN as the instance name
+ # as Incus instance names cannot be a FQDN.
+ return self.get_option('remote_addr').split(".")[0]
+
+ def exec_command(self, cmd, in_data=None, sudoable=True):
+ """ execute a command on the Incus host """
+ super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
+
+ self._display.vvv(f"EXEC {cmd}",
+ host=self._instance())
+
+ local_cmd = self._build_command(cmd)
+ self._display.vvvvv(f"EXEC {local_cmd}", host=self._instance())
+
+ local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd]
+ in_data = to_bytes(in_data, errors='surrogate_or_strict', nonstring='passthru')
+
+ process = Popen(local_cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)
+ stdout, stderr = process.communicate(in_data)
+
+ stdout = to_text(stdout)
+ stderr = to_text(stderr)
+
+ if stderr.startswith("Error: ") and stderr.rstrip().endswith(
+ ": Instance is not running"
+ ):
+ raise AnsibleConnectionFailure(
+ f"instance not running: {self._instance()} (remote={self.get_option('remote')}, project={self.get_option('project')})"
+ )
+
+ if stderr.startswith("Error: ") and stderr.rstrip().endswith(
+ ": Instance not found"
+ ):
+ raise AnsibleConnectionFailure(
+ f"instance not found: {self._instance()} (remote={self.get_option('remote')}, project={self.get_option('project')})"
+ )
+
+ if (
+ stderr.startswith("Error: ")
+ and ": User does not have permission " in stderr
+ ):
+ raise AnsibleConnectionFailure(
+ f"instance access denied: {self._instance()} (remote={self.get_option('remote')}, project={self.get_option('project')})"
+ )
+
+ if (
+ stderr.startswith("Error: ")
+ and ": User does not have entitlement " in stderr
+ ):
+ raise AnsibleConnectionFailure(
+ f"instance access denied: {self._instance()} (remote={self.get_option('remote')}, project={self.get_option('project')})"
+ )
+
+ return process.returncode, stdout, stderr
+
+ def _get_remote_uid_gid(self) -> tuple[int, int]:
+ """Get the user and group ID of 'remote_user' from the instance."""
+
+ rc, uid_out, err = self.exec_command("/bin/id -u")
+ if rc != 0:
+ raise AnsibleError(
+ f"Failed to get remote uid for user {self.get_option('remote_user')}: {err}"
+ )
+ uid = uid_out.strip()
+
+ rc, gid_out, err = self.exec_command("/bin/id -g")
+ if rc != 0:
+ raise AnsibleError(
+ f"Failed to get remote gid for user {self.get_option('remote_user')}: {err}"
+ )
+ gid = gid_out.strip()
+
+ return int(uid), int(gid)
+
+ def put_file(self, in_path, out_path):
+ """ put a file from local to Incus """
+ super(Connection, self).put_file(in_path, out_path)
+
+ self._display.vvv(f"PUT {in_path} TO {out_path}",
+ host=self._instance())
+
+ if not os.path.isfile(to_bytes(in_path, errors='surrogate_or_strict')):
+ raise AnsibleFileNotFound(f"input path is not a file: {in_path}")
+
+ if self.get_option("remote_user") != "root":
+ uid, gid = self._get_remote_uid_gid()
+ local_cmd = [
+ self._incus_cmd,
+ "--project",
+ self.get_option("project"),
+ "file",
+ "push",
+ "--uid",
+ str(uid),
+ "--gid",
+ str(gid),
+ "--quiet",
+ in_path,
+ f"{self.get_option('remote')}:{self._instance()}/{out_path}",
+ ]
+ else:
+ local_cmd = [
+ self._incus_cmd,
+ "--project",
+ self.get_option("project"),
+ "file",
+ "push",
+ "--quiet",
+ in_path,
+ f"{self.get_option('remote')}:{self._instance()}/{out_path}",
+ ]
+
+ self._display.vvvvv(f"PUT {local_cmd}", host=self._instance())
+
+ local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd]
+
+ call(local_cmd)
+
+ def fetch_file(self, in_path, out_path):
+ """ fetch a file from Incus to local """
+ super(Connection, self).fetch_file(in_path, out_path)
+
+ self._display.vvv(f"FETCH {in_path} TO {out_path}",
+ host=self._instance())
+
+ local_cmd = [
+ self._incus_cmd,
+ "--project", self.get_option("project"),
+ "file", "pull", "--quiet",
+ f"{self.get_option('remote')}:{self._instance()}/{in_path}",
+ out_path]
+
+ local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd]
+
+ call(local_cmd)
+
+ def close(self):
+ """ close the connection (nothing to do here) """
+ super(Connection, self).close()
+
+ self._connected = False
diff --git a/plugins/connection/iocage.py b/plugins/connection/iocage.py
index 2e2a6f0937..fa4973bae1 100644
--- a/plugins/connection/iocage.py
+++ b/plugins/connection/iocage.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
# Based on jail.py
# (c) 2013, Michael Scherer
# (c) 2015, Toshio Kuratomi
@@ -7,29 +6,30 @@
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
-DOCUMENTATION = '''
- author: Stephan Lohse (!UNKNOWN)
- name: iocage
- short_description: Run tasks in iocage jails
+DOCUMENTATION = r"""
+author: Stephan Lohse (!UNKNOWN)
+name: iocage
+short_description: Run tasks in iocage jails
+description:
+ - Run commands or put/fetch files to an existing iocage jail.
+options:
+ remote_addr:
description:
- - Run commands or put/fetch files to an existing iocage jail
- options:
- remote_addr:
- description:
- - Path to the jail
- vars:
- - name: ansible_host
- - name: ansible_iocage_host
- remote_user:
- description:
- - User to execute as inside the jail
- vars:
- - name: ansible_user
- - name: ansible_iocage_user
-'''
+ - Path to the jail.
+ type: string
+ vars:
+ - name: ansible_host
+ - name: ansible_iocage_host
+ remote_user:
+ description:
+ - User to execute as inside the jail.
+ type: string
+ vars:
+ - name: ansible_user
+ - name: ansible_iocage_user
+"""
import subprocess
@@ -53,11 +53,12 @@ class Connection(Jail):
jail_uuid = self.get_jail_uuid()
- kwargs[Jail.modified_jailname_key] = 'ioc-{0}'.format(jail_uuid)
+ kwargs[Jail.modified_jailname_key] = f'ioc-{jail_uuid}'
- display.vvv(u"Jail {iocjail} has been translated to {rawjail}".format(
- iocjail=self.ioc_jail, rawjail=kwargs[Jail.modified_jailname_key]),
- host=kwargs[Jail.modified_jailname_key])
+ display.vvv(
+ f"Jail {self.ioc_jail} has been translated to {kwargs[Jail.modified_jailname_key]}",
+ host=kwargs[Jail.modified_jailname_key]
+ )
super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs)
@@ -79,6 +80,6 @@ class Connection(Jail):
p.wait()
if p.returncode != 0:
- raise AnsibleError(u"iocage returned an error: {0}".format(stdout))
+ raise AnsibleError(f"iocage returned an error: {stdout}")
return stdout.strip('\n')
diff --git a/plugins/connection/jail.py b/plugins/connection/jail.py
index d813780136..7f25c3fe01 100644
--- a/plugins/connection/jail.py
+++ b/plugins/connection/jail.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
# Based on local.py by Michael DeHaan
# and chroot.py by Maykel Moya
# Copyright (c) 2013, Michael Scherer
@@ -7,38 +6,40 @@
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
-DOCUMENTATION = '''
- author: Ansible Core Team
- name: jail
- short_description: Run tasks in jails
+DOCUMENTATION = r"""
+author: Ansible Core Team
+name: jail
+short_description: Run tasks in jails
+description:
+ - Run commands or put/fetch files to an existing jail.
+options:
+ remote_addr:
description:
- - Run commands or put/fetch files to an existing jail
- options:
- remote_addr:
- description:
- - Path to the jail
- default: inventory_hostname
- vars:
- - name: ansible_host
- - name: ansible_jail_host
- remote_user:
- description:
- - User to execute as inside the jail
- vars:
- - name: ansible_user
- - name: ansible_jail_user
-'''
+ - Path to the jail.
+ type: string
+ default: inventory_hostname
+ vars:
+ - name: inventory_hostname
+ - name: ansible_host
+ - name: ansible_jail_host
+ remote_user:
+ description:
+ - User to execute as inside the jail.
+ type: string
+ vars:
+ - name: ansible_user
+ - name: ansible_jail_user
+"""
import os
import os.path
import subprocess
import traceback
+from shlex import quote as shlex_quote
from ansible.errors import AnsibleError
-from ansible.module_utils.six.moves import shlex_quote
from ansible.module_utils.common.process import get_bin_path
from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text
from ansible.plugins.connection import ConnectionBase, BUFSIZE
@@ -72,14 +73,14 @@ class Connection(ConnectionBase):
self.jexec_cmd = self._search_executable('jexec')
if self.jail not in self.list_jails():
- raise AnsibleError("incorrect jail name %s" % self.jail)
+ raise AnsibleError(f"incorrect jail name {self.jail}")
@staticmethod
def _search_executable(executable):
try:
return get_bin_path(executable)
except ValueError:
- raise AnsibleError("%s command not found in PATH" % executable)
+ raise AnsibleError(f"{executable} command not found in PATH")
def list_jails(self):
p = subprocess.Popen([self.jls_cmd, '-q', 'name'],
@@ -94,7 +95,7 @@ class Connection(ConnectionBase):
""" connect to the jail; nothing to do here """
super(Connection, self)._connect()
if not self._connected:
- display.vvv(u"ESTABLISH JAIL CONNECTION FOR USER: {0}".format(self._play_context.remote_user), host=self.jail)
+ display.vvv(f"ESTABLISH JAIL CONNECTION FOR USER: {self._play_context.remote_user}", host=self.jail)
self._connected = True
def _buffered_exec_command(self, cmd, stdin=subprocess.PIPE):
@@ -112,11 +113,11 @@ class Connection(ConnectionBase):
if self._play_context.remote_user is not None:
local_cmd += ['-U', self._play_context.remote_user]
# update HOME since -U does not update the jail environment
- set_env = 'HOME=~' + self._play_context.remote_user + ' '
+ set_env = f"HOME=~{self._play_context.remote_user} "
local_cmd += [self.jail, self._play_context.executable, '-c', set_env + cmd]
- display.vvv("EXEC %s" % (local_cmd,), host=self.jail)
+ display.vvv(f"EXEC {local_cmd}", host=self.jail)
local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd]
p = subprocess.Popen(local_cmd, shell=False, stdin=stdin,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
@@ -141,7 +142,7 @@ class Connection(ConnectionBase):
exist in any given chroot. So for now we're choosing "/" instead.
This also happens to be the former default.
- Can revisit using $HOME instead if it's a problem
+ Can revisit using $HOME instead if it is a problem
"""
if not remote_path.startswith(os.path.sep):
remote_path = os.path.join(os.path.sep, remote_path)
@@ -150,7 +151,7 @@ class Connection(ConnectionBase):
def put_file(self, in_path, out_path):
""" transfer a file from local to jail """
super(Connection, self).put_file(in_path, out_path)
- display.vvv("PUT %s TO %s" % (in_path, out_path), host=self.jail)
+ display.vvv(f"PUT {in_path} TO {out_path}", host=self.jail)
out_path = shlex_quote(self._prefix_login_path(out_path))
try:
@@ -160,27 +161,27 @@ class Connection(ConnectionBase):
else:
count = ''
try:
- p = self._buffered_exec_command('dd of=%s bs=%s%s' % (out_path, BUFSIZE, count), stdin=in_file)
+ p = self._buffered_exec_command(f'dd of={out_path} bs={BUFSIZE}{count}', stdin=in_file)
except OSError:
raise AnsibleError("jail connection requires dd command in the jail")
try:
stdout, stderr = p.communicate()
except Exception:
traceback.print_exc()
- raise AnsibleError("failed to transfer file %s to %s" % (in_path, out_path))
+ raise AnsibleError(f"failed to transfer file {in_path} to {out_path}")
if p.returncode != 0:
- raise AnsibleError("failed to transfer file %s to %s:\n%s\n%s" % (in_path, out_path, to_native(stdout), to_native(stderr)))
+ raise AnsibleError(f"failed to transfer file {in_path} to {out_path}:\n{to_native(stdout)}\n{to_native(stderr)}")
except IOError:
- raise AnsibleError("file or module does not exist at: %s" % in_path)
+ raise AnsibleError(f"file or module does not exist at: {in_path}")
def fetch_file(self, in_path, out_path):
""" fetch a file from jail to local """
super(Connection, self).fetch_file(in_path, out_path)
- display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self.jail)
+ display.vvv(f"FETCH {in_path} TO {out_path}", host=self.jail)
in_path = shlex_quote(self._prefix_login_path(in_path))
try:
- p = self._buffered_exec_command('dd if=%s bs=%s' % (in_path, BUFSIZE))
+ p = self._buffered_exec_command(f'dd if={in_path} bs={BUFSIZE}')
except OSError:
raise AnsibleError("jail connection requires dd command in the jail")
@@ -192,10 +193,10 @@ class Connection(ConnectionBase):
chunk = p.stdout.read(BUFSIZE)
except Exception:
traceback.print_exc()
- raise AnsibleError("failed to transfer file %s to %s" % (in_path, out_path))
+ raise AnsibleError(f"failed to transfer file {in_path} to {out_path}")
stdout, stderr = p.communicate()
if p.returncode != 0:
- raise AnsibleError("failed to transfer file %s to %s:\n%s\n%s" % (in_path, out_path, to_native(stdout), to_native(stderr)))
+ raise AnsibleError(f"failed to transfer file {in_path} to {out_path}:\n{to_native(stdout)}\n{to_native(stderr)}")
def close(self):
""" terminate the connection; nothing to do here """
diff --git a/plugins/connection/lxc.py b/plugins/connection/lxc.py
index adf3eec1c1..e8e28ed804 100644
--- a/plugins/connection/lxc.py
+++ b/plugins/connection/lxc.py
@@ -1,34 +1,35 @@
-# -*- coding: utf-8 -*-
# (c) 2015, Joerg Thalheim
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
-DOCUMENTATION = '''
- author: Joerg Thalheim (!UNKNOWN)
- name: lxc
- short_description: Run tasks in lxc containers via lxc python library
+DOCUMENTATION = r"""
+author: Joerg Thalheim (!UNKNOWN)
+name: lxc
+short_description: Run tasks in LXC containers using lxc python library
+description:
+ - Run commands or put/fetch files to an existing LXC container using lxc python library.
+options:
+ remote_addr:
description:
- - Run commands or put/fetch files to an existing lxc container using lxc python library
- options:
- remote_addr:
- description:
- - Container identifier
- default: inventory_hostname
- vars:
- - name: ansible_host
- - name: ansible_lxc_host
- executable:
- default: /bin/sh
- description:
- - Shell executable
- vars:
- - name: ansible_executable
- - name: ansible_lxc_executable
-'''
+ - Container identifier.
+ type: string
+ default: inventory_hostname
+ vars:
+ - name: inventory_hostname
+ - name: ansible_host
+ - name: ansible_lxc_host
+ executable:
+ default: /bin/sh
+ description:
+ - Shell executable.
+ type: string
+ vars:
+ - name: ansible_executable
+ - name: ansible_lxc_executable
+"""
import os
import shutil
@@ -59,7 +60,7 @@ class Connection(ConnectionBase):
def __init__(self, play_context, new_stdin, *args, **kwargs):
super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs)
- self.container_name = self._play_context.remote_addr
+ self.container_name = None
self.container = None
def _connect(self):
@@ -67,16 +68,19 @@ class Connection(ConnectionBase):
super(Connection, self)._connect()
if not HAS_LIBLXC:
- msg = "lxc bindings for python2 are not installed"
+ msg = "lxc python bindings are not installed"
raise errors.AnsibleError(msg)
- if self.container:
+ container_name = self.get_option('remote_addr')
+ if self.container and self.container_name == container_name:
return
+ self.container_name = container_name
+
self._display.vvv("THIS IS A LOCAL LXC DIR", host=self.container_name)
self.container = _lxc.Container(self.container_name)
if self.container.state == "STOPPED":
- raise errors.AnsibleError("%s is not running" % self.container_name)
+ raise errors.AnsibleError(f"{self.container_name} is not running")
@staticmethod
def _communicate(pid, in_data, stdin, stdout, stderr):
@@ -117,7 +121,7 @@ class Connection(ConnectionBase):
super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
# python2-lxc needs bytes. python3-lxc needs text.
- executable = to_native(self._play_context.executable, errors='surrogate_or_strict')
+ executable = to_native(self.get_option('executable'), errors='surrogate_or_strict')
local_cmd = [executable, '-c', to_native(cmd, errors='surrogate_or_strict')]
read_stdout, write_stdout = None, None
@@ -138,10 +142,10 @@ class Connection(ConnectionBase):
read_stdin, write_stdin = os.pipe()
kwargs['stdin'] = self._set_nonblocking(read_stdin)
- self._display.vvv("EXEC %s" % (local_cmd), host=self.container_name)
+ self._display.vvv(f"EXEC {local_cmd}", host=self.container_name)
pid = self.container.attach(_lxc.attach_run_command, local_cmd, **kwargs)
if pid == -1:
- msg = "failed to attach to container %s" % self.container_name
+ msg = f"failed to attach to container {self.container_name}"
raise errors.AnsibleError(msg)
write_stdout = os.close(write_stdout)
@@ -168,18 +172,18 @@ class Connection(ConnectionBase):
def put_file(self, in_path, out_path):
''' transfer a file from local to lxc '''
super(Connection, self).put_file(in_path, out_path)
- self._display.vvv("PUT %s TO %s" % (in_path, out_path), host=self.container_name)
+ self._display.vvv(f"PUT {in_path} TO {out_path}", host=self.container_name)
in_path = to_bytes(in_path, errors='surrogate_or_strict')
out_path = to_bytes(out_path, errors='surrogate_or_strict')
if not os.path.exists(in_path):
- msg = "file or module does not exist: %s" % in_path
+ msg = f"file or module does not exist: {in_path}"
raise errors.AnsibleFileNotFound(msg)
try:
src_file = open(in_path, "rb")
except IOError:
traceback.print_exc()
- raise errors.AnsibleError("failed to open input file to %s" % in_path)
+ raise errors.AnsibleError(f"failed to open input file to {in_path}")
try:
def write_file(args):
with open(out_path, 'wb+') as dst_file:
@@ -188,7 +192,7 @@ class Connection(ConnectionBase):
self.container.attach_wait(write_file, None)
except IOError:
traceback.print_exc()
- msg = "failed to transfer file to %s" % out_path
+ msg = f"failed to transfer file to {out_path}"
raise errors.AnsibleError(msg)
finally:
src_file.close()
@@ -196,7 +200,7 @@ class Connection(ConnectionBase):
def fetch_file(self, in_path, out_path):
''' fetch a file from lxc to local '''
super(Connection, self).fetch_file(in_path, out_path)
- self._display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self.container_name)
+ self._display.vvv(f"FETCH {in_path} TO {out_path}", host=self.container_name)
in_path = to_bytes(in_path, errors='surrogate_or_strict')
out_path = to_bytes(out_path, errors='surrogate_or_strict')
@@ -204,7 +208,7 @@ class Connection(ConnectionBase):
dst_file = open(out_path, "wb")
except IOError:
traceback.print_exc()
- msg = "failed to open output file %s" % out_path
+ msg = f"failed to open output file {out_path}"
raise errors.AnsibleError(msg)
try:
def write_file(args):
@@ -219,7 +223,7 @@ class Connection(ConnectionBase):
self.container.attach_wait(write_file, None)
except IOError:
traceback.print_exc()
- msg = "failed to transfer file from %s to %s" % (in_path, out_path)
+ msg = f"failed to transfer file from {in_path} to {out_path}"
raise errors.AnsibleError(msg)
finally:
dst_file.close()
diff --git a/plugins/connection/lxd.py b/plugins/connection/lxd.py
index affb87dfd0..d4d3b45d0a 100644
--- a/plugins/connection/lxd.py
+++ b/plugins/connection/lxd.py
@@ -1,48 +1,77 @@
-# -*- coding: utf-8 -*-
# Copyright (c) 2016 Matt Clay
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
-DOCUMENTATION = '''
- author: Matt Clay (@mattclay)
- name: lxd
- short_description: Run tasks in lxc containers via lxc CLI
+DOCUMENTATION = r"""
+author: Matt Clay (@mattclay)
+name: lxd
+short_description: Run tasks in LXD instances using C(lxc) CLI
+description:
+ - Run commands or put/fetch files to an existing instance using C(lxc) CLI.
+options:
+ remote_addr:
description:
- - Run commands or put/fetch files to an existing lxc container using lxc CLI
- options:
- remote_addr:
- description:
- - Container identifier.
- default: inventory_hostname
- vars:
- - name: inventory_hostname
- - name: ansible_host
- - name: ansible_lxd_host
- executable:
- description:
- - shell to use for execution inside container
- default: /bin/sh
- vars:
- - name: ansible_executable
- - name: ansible_lxd_executable
- remote:
- description:
- - Name of the LXD remote to use.
- default: local
- vars:
- - name: ansible_lxd_remote
- version_added: 2.0.0
- project:
- description:
- - Name of the LXD project to use.
- vars:
- - name: ansible_lxd_project
- version_added: 2.0.0
-'''
+ - Instance (container/VM) identifier.
+ - Since community.general 8.0.0, a FQDN can be provided; in that case, the first component (the part before C(.)) is
+ used as the instance identifier.
+ type: string
+ default: inventory_hostname
+ vars:
+ - name: inventory_hostname
+ - name: ansible_host
+ - name: ansible_lxd_host
+ executable:
+ description:
+ - Shell to use for execution inside instance.
+ type: string
+ default: /bin/sh
+ vars:
+ - name: ansible_executable
+ - name: ansible_lxd_executable
+ lxd_become_method:
+ description:
+ - Become command used to switch to a non-root user.
+ - Is only used when O(remote_user) is not V(root).
+ type: str
+ default: /bin/su
+ vars:
+ - name: lxd_become_method
+ version_added: 10.4.0
+ remote:
+ description:
+ - Name of the LXD remote to use.
+ type: string
+ default: local
+ vars:
+ - name: ansible_lxd_remote
+ version_added: 2.0.0
+ remote_user:
+ description:
+ - User to login/authenticate as.
+ - Can be set from the CLI with the C(--user) or C(-u) options.
+ type: string
+ default: root
+ vars:
+ - name: ansible_user
+ env:
+ - name: ANSIBLE_REMOTE_USER
+ ini:
+ - section: defaults
+ key: remote_user
+ keyword:
+ - name: remote_user
+ version_added: 10.4.0
+ project:
+ description:
+ - Name of the LXD project to use.
+ type: string
+ vars:
+ - name: ansible_lxd_project
+ version_added: 2.0.0
+"""
import os
from subprocess import Popen, PIPE
@@ -58,7 +87,6 @@ class Connection(ConnectionBase):
transport = 'community.general.lxd'
has_pipelining = True
- default_user = 'root'
def __init__(self, play_context, new_stdin, *args, **kwargs):
super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs)
@@ -68,32 +96,50 @@ class Connection(ConnectionBase):
except ValueError:
raise AnsibleError("lxc command not found in PATH")
- if self._play_context.remote_user is not None and self._play_context.remote_user != 'root':
- self._display.warning('lxd does not support remote_user, using container default: root')
+ def _host(self):
+ """ translate remote_addr to lxd (short) hostname """
+ return self.get_option("remote_addr").split(".", 1)[0]
def _connect(self):
"""connect to lxd (nothing to do here) """
super(Connection, self)._connect()
if not self._connected:
- self._display.vvv(u"ESTABLISH LXD CONNECTION FOR USER: root", host=self.get_option('remote_addr'))
+ self._display.vvv(f"ESTABLISH LXD CONNECTION FOR USER: {self.get_option('remote_user')}", host=self._host())
self._connected = True
+ def _build_command(self, cmd) -> str:
+ """build the command to execute on the lxd host"""
+
+ exec_cmd = [self._lxc_cmd]
+
+ if self.get_option("project"):
+ exec_cmd.extend(["--project", self.get_option("project")])
+
+ exec_cmd.extend(["exec", f"{self.get_option('remote')}:{self._host()}", "--"])
+
+ if self.get_option("remote_user") != "root":
+ self._display.vvv(
+ f"INFO: Running as non-root user: {self.get_option('remote_user')}, \
+ trying to run 'lxc exec' with become method: {self.get_option('lxd_become_method')}",
+ host=self._host(),
+ )
+ exec_cmd.extend(
+ [self.get_option("lxd_become_method"), self.get_option("remote_user"), "-c"]
+ )
+
+ exec_cmd.extend([self.get_option("executable"), "-c", cmd])
+
+ return exec_cmd
+
def exec_command(self, cmd, in_data=None, sudoable=True):
""" execute a command on the lxd host """
super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
- self._display.vvv(u"EXEC {0}".format(cmd), host=self.get_option('remote_addr'))
+ self._display.vvv(f"EXEC {cmd}", host=self._host())
- local_cmd = [self._lxc_cmd]
- if self.get_option("project"):
- local_cmd.extend(["--project", self.get_option("project")])
- local_cmd.extend([
- "exec",
- "%s:%s" % (self.get_option("remote"), self.get_option("remote_addr")),
- "--",
- self.get_option("executable"), "-c", cmd
- ])
+ local_cmd = self._build_command(cmd)
+ self._display.vvvvv(f"EXEC {local_cmd}", host=self._host())
local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd]
in_data = to_bytes(in_data, errors='surrogate_or_strict', nonstring='passthru')
@@ -104,31 +150,73 @@ class Connection(ConnectionBase):
stdout = to_text(stdout)
stderr = to_text(stderr)
- if stderr == "error: Container is not running.\n":
- raise AnsibleConnectionFailure("container not running: %s" % self.get_option('remote_addr'))
+ self._display.vvvvv(f"EXEC lxc output: {stdout} {stderr}", host=self._host())
- if stderr == "error: not found\n":
- raise AnsibleConnectionFailure("container not found: %s" % self.get_option('remote_addr'))
+ if "is not running" in stderr:
+ raise AnsibleConnectionFailure(f"instance not running: {self._host()}")
+
+ if stderr.strip() == "Error: Instance not found" or stderr.strip() == "error: not found":
+ raise AnsibleConnectionFailure(f"instance not found: {self._host()}")
return process.returncode, stdout, stderr
+ def _get_remote_uid_gid(self) -> tuple[int, int]:
+ """Get the user and group ID of 'remote_user' from the instance."""
+
+ rc, uid_out, err = self.exec_command("/bin/id -u")
+ if rc != 0:
+ raise AnsibleError(
+ f"Failed to get remote uid for user {self.get_option('remote_user')}: {err}"
+ )
+ uid = uid_out.strip()
+
+ rc, gid_out, err = self.exec_command("/bin/id -g")
+ if rc != 0:
+ raise AnsibleError(
+ f"Failed to get remote gid for user {self.get_option('remote_user')}: {err}"
+ )
+ gid = gid_out.strip()
+
+ return int(uid), int(gid)
+
def put_file(self, in_path, out_path):
""" put a file from local to lxd """
super(Connection, self).put_file(in_path, out_path)
- self._display.vvv(u"PUT {0} TO {1}".format(in_path, out_path), host=self.get_option('remote_addr'))
+ self._display.vvv(f"PUT {in_path} TO {out_path}", host=self._host())
if not os.path.isfile(to_bytes(in_path, errors='surrogate_or_strict')):
- raise AnsibleFileNotFound("input path is not a file: %s" % in_path)
+ raise AnsibleFileNotFound(f"input path is not a file: {in_path}")
local_cmd = [self._lxc_cmd]
if self.get_option("project"):
local_cmd.extend(["--project", self.get_option("project")])
- local_cmd.extend([
- "file", "push",
- in_path,
- "%s:%s/%s" % (self.get_option("remote"), self.get_option("remote_addr"), out_path)
- ])
+
+ if self.get_option("remote_user") != "root":
+ uid, gid = self._get_remote_uid_gid()
+ local_cmd.extend(
+ [
+ "file",
+ "push",
+ "--uid",
+ str(uid),
+ "--gid",
+ str(gid),
+ in_path,
+ f"{self.get_option('remote')}:{self._host()}/{out_path}",
+ ]
+ )
+ else:
+ local_cmd.extend(
+ [
+ "file",
+ "push",
+ in_path,
+ f"{self.get_option('remote')}:{self._host()}/{out_path}",
+ ]
+ )
+
+ self._display.vvvvv(f"PUT {local_cmd}", host=self._host())
local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd]
@@ -139,14 +227,14 @@ class Connection(ConnectionBase):
""" fetch a file from lxd to local """
super(Connection, self).fetch_file(in_path, out_path)
- self._display.vvv(u"FETCH {0} TO {1}".format(in_path, out_path), host=self.get_option('remote_addr'))
+ self._display.vvv(f"FETCH {in_path} TO {out_path}", host=self._host())
local_cmd = [self._lxc_cmd]
if self.get_option("project"):
local_cmd.extend(["--project", self.get_option("project")])
local_cmd.extend([
"file", "pull",
- "%s:%s/%s" % (self.get_option("remote"), self.get_option("remote_addr"), in_path),
+ f"{self.get_option('remote')}:{self._host()}/{in_path}",
out_path
])
diff --git a/plugins/connection/qubes.py b/plugins/connection/qubes.py
index 25594e952b..8d69594b22 100644
--- a/plugins/connection/qubes.py
+++ b/plugins/connection/qubes.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
# Based on the buildah connection plugin
# Copyright (c) 2017 Ansible Project
# 2018 Kushal Das
@@ -8,36 +7,36 @@
#
# Written by: Kushal Das (https://github.com/kushaldas)
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
-DOCUMENTATION = '''
- name: qubes
- short_description: Interact with an existing QubesOS AppVM
+DOCUMENTATION = r"""
+name: qubes
+short_description: Interact with an existing QubesOS AppVM
+description:
+ - Run commands or put/fetch files to an existing Qubes AppVM using qubes tools.
+author: Kushal Das (@kushaldas)
+
+
+options:
+ remote_addr:
description:
- - Run commands or put/fetch files to an existing Qubes AppVM using qubes tools.
-
- author: Kushal Das (@kushaldas)
-
-
- options:
- remote_addr:
- description:
- - vm name
- default: inventory_hostname
- vars:
- - name: ansible_host
- remote_user:
- description:
- - The user to execute as inside the vm.
- default: The *user* account as default in Qubes OS.
- vars:
- - name: ansible_user
+ - VM name.
+ type: string
+ default: inventory_hostname
+ vars:
+ - name: ansible_host
+ remote_user:
+ description:
+ - The user to execute as inside the VM.
+ type: string
+ default: The I(user) account as default in Qubes OS.
+ vars:
+ - name: ansible_user
# keyword:
# - name: hosts
-'''
+"""
import subprocess
@@ -76,7 +75,7 @@ class Connection(ConnectionBase):
"""
display.vvvv("CMD: ", cmd)
if not cmd.endswith("\n"):
- cmd = cmd + "\n"
+ cmd = f"{cmd}\n"
local_cmd = []
# For dom0
@@ -93,7 +92,7 @@ class Connection(ConnectionBase):
display.vvvv("Local cmd: ", local_cmd)
- display.vvv("RUN %s" % (local_cmd,), host=self._remote_vmname)
+ display.vvv(f"RUN {local_cmd}", host=self._remote_vmname)
p = subprocess.Popen(local_cmd, shell=False, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
@@ -112,42 +111,42 @@ class Connection(ConnectionBase):
"""Run specified command in a running QubesVM """
super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
- display.vvvv("CMD IS: %s" % cmd)
+ display.vvvv(f"CMD IS: {cmd}")
rc, stdout, stderr = self._qubes(cmd)
- display.vvvvv("STDOUT %r STDERR %r" % (stderr, stderr))
+ display.vvvvv(f"STDOUT {stdout!r} STDERR {stderr!r}")
return rc, stdout, stderr
def put_file(self, in_path, out_path):
""" Place a local file located in 'in_path' inside VM at 'out_path' """
super(Connection, self).put_file(in_path, out_path)
- display.vvv("PUT %s TO %s" % (in_path, out_path), host=self._remote_vmname)
+ display.vvv(f"PUT {in_path} TO {out_path}", host=self._remote_vmname)
with open(in_path, "rb") as fobj:
source_data = fobj.read()
- retcode, dummy, dummy = self._qubes('cat > "{0}"\n'.format(out_path), source_data, "qubes.VMRootShell")
+ retcode, dummy, dummy = self._qubes(f'cat > "{out_path}\"\n', source_data, "qubes.VMRootShell")
# if qubes.VMRootShell service not supported, fallback to qubes.VMShell and
# hope it will have appropriate permissions
if retcode == 127:
- retcode, dummy, dummy = self._qubes('cat > "{0}"\n'.format(out_path), source_data)
+ retcode, dummy, dummy = self._qubes(f'cat > "{out_path}\"\n', source_data)
if retcode != 0:
- raise AnsibleConnectionFailure('Failed to put_file to {0}'.format(out_path))
+ raise AnsibleConnectionFailure(f'Failed to put_file to {out_path}')
def fetch_file(self, in_path, out_path):
"""Obtain file specified via 'in_path' from the container and place it at 'out_path' """
super(Connection, self).fetch_file(in_path, out_path)
- display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self._remote_vmname)
+ display.vvv(f"FETCH {in_path} TO {out_path}", host=self._remote_vmname)
# We are running in dom0
- cmd_args_list = ["qvm-run", "--pass-io", self._remote_vmname, "cat {0}".format(in_path)]
+ cmd_args_list = ["qvm-run", "--pass-io", self._remote_vmname, f"cat {in_path}"]
with open(out_path, "wb") as fobj:
p = subprocess.Popen(cmd_args_list, shell=False, stdout=fobj)
p.communicate()
if p.returncode != 0:
- raise AnsibleConnectionFailure('Failed to fetch file to {0}'.format(out_path))
+ raise AnsibleConnectionFailure(f'Failed to fetch file to {out_path}')
def close(self):
""" Closing the connection """
diff --git a/plugins/connection/saltstack.py b/plugins/connection/saltstack.py
index 1dbc7296c7..b09ffcd787 100644
--- a/plugins/connection/saltstack.py
+++ b/plugins/connection/saltstack.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
# Based on local.py (c) 2012, Michael DeHaan
# Based on chroot.py (c) 2013, Maykel Moya
# Based on func.py
@@ -7,16 +6,15 @@
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
-DOCUMENTATION = '''
- author: Michael Scherer (@mscherer)
- name: saltstack
- short_description: Allow ansible to piggyback on salt minions
- description:
- - This allows you to use existing Saltstack infrastructure to connect to targets.
-'''
+DOCUMENTATION = r"""
+author: Michael Scherer (@mscherer)
+name: saltstack
+short_description: Allow ansible to piggyback on salt minions
+description:
+ - This allows you to use existing Saltstack infrastructure to connect to targets.
+"""
import os
import base64
@@ -59,11 +57,11 @@ class Connection(ConnectionBase):
if in_data:
raise errors.AnsibleError("Internal Error: this module does not support optimized module pipelining")
- self._display.vvv("EXEC %s" % cmd, host=self.host)
+ self._display.vvv(f"EXEC {cmd}", host=self.host)
# need to add 'true;' to work around https://github.com/saltstack/salt/issues/28077
- res = self.client.cmd(self.host, 'cmd.exec_code_all', ['bash', 'true;' + cmd])
+ res = self.client.cmd(self.host, 'cmd.exec_code_all', ['bash', f"true;{cmd}"])
if self.host not in res:
- raise errors.AnsibleError("Minion %s didn't answer, check if salt-minion is running and the name is correct" % self.host)
+ raise errors.AnsibleError(f"Minion {self.host} didn't answer, check if salt-minion is running and the name is correct")
p = res[self.host]
return p['retcode'], p['stdout'], p['stderr']
@@ -81,7 +79,7 @@ class Connection(ConnectionBase):
super(Connection, self).put_file(in_path, out_path)
out_path = self._normalize_path(out_path, '/')
- self._display.vvv("PUT %s TO %s" % (in_path, out_path), host=self.host)
+ self._display.vvv(f"PUT {in_path} TO {out_path}", host=self.host)
with open(in_path, 'rb') as in_fh:
content = in_fh.read()
self.client.cmd(self.host, 'hashutil.base64_decodefile', [base64.b64encode(content), out_path])
@@ -93,7 +91,7 @@ class Connection(ConnectionBase):
super(Connection, self).fetch_file(in_path, out_path)
in_path = self._normalize_path(in_path, '/')
- self._display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self.host)
+ self._display.vvv(f"FETCH {in_path} TO {out_path}", host=self.host)
content = self.client.cmd(self.host, 'cp.get_file_str', [in_path])[self.host]
open(out_path, 'wb').write(content)
diff --git a/plugins/connection/wsl.py b/plugins/connection/wsl.py
new file mode 100644
index 0000000000..3b768eebf8
--- /dev/null
+++ b/plugins/connection/wsl.py
@@ -0,0 +1,790 @@
+# Derived from ansible/plugins/connection/proxmox_pct_remote.py (c) 2024 Nils Stein (@mietzen)
+# Derived from ansible/plugins/connection/paramiko_ssh.py (c) 2012, Michael DeHaan
+# Copyright (c) 2025 Rui Lopes (@rgl)
+# Copyright (c) 2025 Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import annotations
+
+DOCUMENTATION = r"""
+author: Rui Lopes (@rgl)
+name: wsl
+short_description: Run tasks in WSL distribution using wsl.exe CLI using SSH
+requirements:
+ - paramiko
+description:
+ - Run commands or put/fetch files to an existing WSL distribution using wsl.exe CLI using SSH.
+ - Uses the Python SSH implementation (Paramiko) to connect to the WSL host.
+version_added: "10.6.0"
+options:
+ remote_addr:
+ description:
+ - Address of the remote target.
+ default: inventory_hostname
+ type: string
+ vars:
+ - name: inventory_hostname
+ - name: ansible_host
+ - name: ansible_ssh_host
+ - name: ansible_paramiko_host
+ port:
+ description: Remote port to connect to.
+ type: int
+ default: 22
+ ini:
+ - section: defaults
+ key: remote_port
+ - section: paramiko_connection
+ key: remote_port
+ env:
+ - name: ANSIBLE_REMOTE_PORT
+ - name: ANSIBLE_REMOTE_PARAMIKO_PORT
+ vars:
+ - name: ansible_port
+ - name: ansible_ssh_port
+ - name: ansible_paramiko_port
+ keyword:
+ - name: port
+ remote_user:
+ description:
+ - User to login/authenticate as.
+ - Can be set from the CLI with the C(--user) or C(-u) options.
+ type: string
+ vars:
+ - name: ansible_user
+ - name: ansible_ssh_user
+ - name: ansible_paramiko_user
+ env:
+ - name: ANSIBLE_REMOTE_USER
+ - name: ANSIBLE_PARAMIKO_REMOTE_USER
+ ini:
+ - section: defaults
+ key: remote_user
+ - section: paramiko_connection
+ key: remote_user
+ keyword:
+ - name: remote_user
+ password:
+ description:
+ - Secret used to either login the SSH server or as a passphrase for SSH keys that require it.
+ - Can be set from the CLI with the C(--ask-pass) option.
+ type: string
+ vars:
+ - name: ansible_password
+ - name: ansible_ssh_pass
+ - name: ansible_ssh_password
+ - name: ansible_paramiko_pass
+ - name: ansible_paramiko_password
+ use_rsa_sha2_algorithms:
+ description:
+ - Whether or not to enable RSA SHA2 algorithms for pubkeys and hostkeys.
+ - On paramiko versions older than 2.9, this only affects hostkeys.
+ - For behavior matching paramiko<2.9 set this to V(false).
+ vars:
+ - name: ansible_paramiko_use_rsa_sha2_algorithms
+ ini:
+ - {key: use_rsa_sha2_algorithms, section: paramiko_connection}
+ env:
+ - {name: ANSIBLE_PARAMIKO_USE_RSA_SHA2_ALGORITHMS}
+ default: true
+ type: boolean
+ host_key_auto_add:
+ description: "Automatically add host keys to C(~/.ssh/known_hosts)."
+ env:
+ - name: ANSIBLE_PARAMIKO_HOST_KEY_AUTO_ADD
+ ini:
+ - key: host_key_auto_add
+ section: paramiko_connection
+ type: boolean
+ look_for_keys:
+ default: true
+ description: "Set to V(false) to disable searching for private key files in C(~/.ssh/)."
+ env:
+ - name: ANSIBLE_PARAMIKO_LOOK_FOR_KEYS
+ ini:
+ - {key: look_for_keys, section: paramiko_connection}
+ type: boolean
+ proxy_command:
+ default: ""
+ description:
+ - Proxy information for running the connection through a jumphost.
+ - This option is supported by paramiko version 1.9.0 or newer.
+ type: string
+ env:
+ - name: ANSIBLE_PARAMIKO_PROXY_COMMAND
+ ini:
+ - {key: proxy_command, section: paramiko_connection}
+ vars:
+ - name: ansible_paramiko_proxy_command
+ record_host_keys:
+ default: true
+ description: "Save the host keys to a file."
+ env:
+ - name: ANSIBLE_PARAMIKO_RECORD_HOST_KEYS
+ ini:
+ - section: paramiko_connection
+ key: record_host_keys
+ type: boolean
+ host_key_checking:
+ description: "Set this to V(false) if you want to avoid host key checking by the underlying tools Ansible uses to connect
+ to the host."
+ type: boolean
+ default: true
+ env:
+ - name: ANSIBLE_HOST_KEY_CHECKING
+ - name: ANSIBLE_SSH_HOST_KEY_CHECKING
+ - name: ANSIBLE_PARAMIKO_HOST_KEY_CHECKING
+ ini:
+ - section: defaults
+ key: host_key_checking
+ - section: paramiko_connection
+ key: host_key_checking
+ vars:
+ - name: ansible_host_key_checking
+ - name: ansible_ssh_host_key_checking
+ - name: ansible_paramiko_host_key_checking
+ use_persistent_connections:
+ description: "Toggles the use of persistence for connections."
+ type: boolean
+ default: false
+ env:
+ - name: ANSIBLE_USE_PERSISTENT_CONNECTIONS
+ ini:
+ - section: defaults
+ key: use_persistent_connections
+ banner_timeout:
+ type: float
+ default: 30
+ description:
+ - Configures, in seconds, the amount of time to wait for the SSH banner to be presented.
+ - This option is supported by paramiko version 1.15.0 or newer.
+ ini:
+ - section: paramiko_connection
+ key: banner_timeout
+ env:
+ - name: ANSIBLE_PARAMIKO_BANNER_TIMEOUT
+ timeout:
+ type: int
+ default: 10
+ description:
+ - Number of seconds until the plugin gives up on failing to establish a TCP connection.
+ - This option is supported by paramiko version 2.2.0 or newer.
+ ini:
+ - section: defaults
+ key: timeout
+ - section: ssh_connection
+ key: timeout
+ - section: paramiko_connection
+ key: timeout
+ env:
+ - name: ANSIBLE_TIMEOUT
+ - name: ANSIBLE_SSH_TIMEOUT
+ - name: ANSIBLE_PARAMIKO_TIMEOUT
+ vars:
+ - name: ansible_ssh_timeout
+ - name: ansible_paramiko_timeout
+ cli:
+ - name: timeout
+ lock_file_timeout:
+ type: int
+ default: 60
+ description: Number of seconds until the plugin gives up on trying to write a lock file when writing SSH known host keys.
+ vars:
+ - name: ansible_lock_file_timeout
+ env:
+ - name: ANSIBLE_LOCK_FILE_TIMEOUT
+ private_key_file:
+ description:
+ - Path to private key file to use for authentication.
+ type: path
+ ini:
+ - section: defaults
+ key: private_key_file
+ - section: paramiko_connection
+ key: private_key_file
+ env:
+ - name: ANSIBLE_PRIVATE_KEY_FILE
+ - name: ANSIBLE_PARAMIKO_PRIVATE_KEY_FILE
+ vars:
+ - name: ansible_private_key_file
+ - name: ansible_ssh_private_key_file
+ - name: ansible_paramiko_private_key_file
+ cli:
+ - name: private_key_file
+ option: "--private-key"
+ user_known_hosts_file:
+ description:
+ - Path to the user known hosts file.
+ - Used to verify the ssh hosts keys.
+ type: path
+ default: ~/.ssh/known_hosts
+ ini:
+ - section: paramiko_connection
+ key: user_known_hosts_file
+ vars:
+ - name: ansible_paramiko_user_known_hosts_file
+ wsl_distribution:
+ description:
+ - WSL distribution name.
+ type: string
+ required: true
+ vars:
+ - name: wsl_distribution
+ wsl_user:
+ description:
+ - WSL distribution user.
+ type: string
+ vars:
+ - name: wsl_user
+ become_user:
+ description:
+ - WSL distribution user.
+ type: string
+ default: root
+ vars:
+ - name: become_user
+ - name: ansible_become_user
+ become:
+ description:
+ - Whether to use the user defined by O(become_user).
+ type: bool
+ default: false
+ vars:
+ - name: become
+ - name: ansible_become
+"""
+
+EXAMPLES = r"""
+# ------------------------
+# Inventory: inventory.yml
+# ------------------------
+---
+all:
+ children:
+ wsl:
+ hosts:
+ example-wsl-ubuntu:
+ ansible_host: 10.0.0.10
+ wsl_distribution: ubuntu
+ wsl_user: ubuntu
+ vars:
+ ansible_connection: community.general.wsl
+ ansible_user: vagrant
+# ----------------------
+# Playbook: playbook.yml
+# ----------------------
+---
+- name: WSL Example
+ hosts: wsl
+ gather_facts: true
+ become: true
+ tasks:
+ - name: Ping
+ ansible.builtin.ping:
+ - name: Id (with become false)
+ become: false
+ changed_when: false
+ args:
+ executable: /bin/bash
+ ansible.builtin.shell: |
+ exec 2>&1
+ set -x
+ echo "$0"
+ pwd
+ id
+ - name: Id (with become true)
+ changed_when: false
+ args:
+ executable: /bin/bash
+ ansible.builtin.shell: |
+ exec 2>&1
+ set -x
+ echo "$0"
+ pwd
+ id
+ - name: Reboot
+ ansible.builtin.reboot:
+ boot_time_command: systemctl show -p ActiveEnterTimestamp init.scope
+"""
+
+import io
+import os
+import pathlib
+import shlex
+import socket
+import tempfile
+import traceback
+import typing as t
+
+from ansible.errors import (
+ AnsibleAuthenticationFailure,
+ AnsibleConnectionFailure,
+ AnsibleError,
+)
+from ansible_collections.community.general.plugins.module_utils._filelock import FileLock, LockTimeout
+from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
+from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text
+from ansible.playbook.play_context import PlayContext
+from ansible.plugins.connection import ConnectionBase
+from ansible.utils.display import Display
+from ansible.utils.path import makedirs_safe
+from binascii import hexlify
+from subprocess import list2cmdline
+
+try:
+ import paramiko
+ PARAMIKO_IMPORT_ERR = None
+except ImportError:
+ paramiko = None
+ PARAMIKO_IMPORT_ERR = traceback.format_exc()
+
+
+if t.TYPE_CHECKING and PARAMIKO_IMPORT_ERR is None:
+ from paramiko import MissingHostKeyPolicy
+ from paramiko.client import SSHClient
+ from paramiko.pkey import PKey
+else:
+ MissingHostKeyPolicy: type = object
+ SSHClient: type = object
+ PKey: type = object
+
+
+display = Display()
+
+
+def authenticity_msg(hostname: str, ktype: str, fingerprint: str) -> str:
+ msg = f"""
+ paramiko: The authenticity of host '{hostname}' can't be established.
+ The {ktype} key fingerprint is {fingerprint}.
+ Are you sure you want to continue connecting (yes/no)?
+ """
+ return msg
+
+
+class MyAddPolicy(MissingHostKeyPolicy):
+ """
+ Based on AutoAddPolicy in paramiko so we can determine when keys are added
+
+ and also prompt for input.
+
+ Policy for automatically adding the hostname and new host key to the
+ local L{HostKeys} object, and saving it. This is used by L{SSHClient}.
+ """
+
+ def __init__(self, connection: Connection) -> None:
+ self.connection = connection
+ self._options = connection._options
+
+ def missing_host_key(self, client: SSHClient, hostname: str, key: PKey) -> None:
+
+ if all((self.connection.get_option('host_key_checking'), not self.connection.get_option('host_key_auto_add'))):
+
+ fingerprint = hexlify(key.get_fingerprint())
+ ktype = key.get_name()
+
+ if self.connection.get_option('use_persistent_connections') or self.connection.force_persistence:
+ # don't print the prompt string since the user cannot respond
+ # to the question anyway
+ raise AnsibleError(authenticity_msg(hostname, ktype, fingerprint)[1:92])
+
+ inp = to_text(
+ display.prompt_until(authenticity_msg(hostname, ktype, fingerprint), private=False),
+ errors='surrogate_or_strict'
+ )
+
+ if inp.lower() not in ['yes', 'y', '']:
+ raise AnsibleError('host connection rejected by user')
+
+ key._added_by_ansible_this_time = True
+
+ # existing implementation below:
+ client._host_keys.add(hostname, key.get_name(), key)
+
+ # host keys are actually saved in close() function below
+ # in order to control ordering.
+
+
+class Connection(ConnectionBase):
+ """ SSH based connections (paramiko) to WSL """
+
+ transport = 'community.general.wsl'
+ _log_channel: str | None = None
+
+ def __init__(self, play_context: PlayContext, new_stdin: io.TextIOWrapper | None = None, *args: t.Any, **kwargs: t.Any):
+ super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs)
+
+ def _set_log_channel(self, name: str) -> None:
+ """ Mimic paramiko.SSHClient.set_log_channel """
+ self._log_channel = name
+
+ def _parse_proxy_command(self, port: int = 22) -> dict[str, t.Any]:
+ proxy_command = self.get_option('proxy_command') or None
+
+ sock_kwarg = {}
+ if proxy_command:
+ replacers: t.Dict[str, str] = {
+ '%h': self.get_option('remote_addr'),
+ '%p': str(port),
+ '%r': self.get_option('remote_user')
+ }
+ for find, replace in replacers.items():
+ proxy_command = proxy_command.replace(find, replace)
+ try:
+ sock_kwarg = {'sock': paramiko.ProxyCommand(proxy_command)}
+ display.vvv(f'CONFIGURE PROXY COMMAND FOR CONNECTION: {proxy_command}', host=self.get_option('remote_addr'))
+ except AttributeError:
+ display.warning('Paramiko ProxyCommand support unavailable. '
+ 'Please upgrade to Paramiko 1.9.0 or newer. '
+ 'Not using configured ProxyCommand')
+
+ return sock_kwarg
+
+ def _connect(self) -> Connection:
+ """ activates the connection object """
+
+ if PARAMIKO_IMPORT_ERR is not None:
+ raise AnsibleError(f'paramiko is not installed: {to_native(PARAMIKO_IMPORT_ERR)}')
+
+ port = self.get_option('port')
+ display.vvv(f'ESTABLISH PARAMIKO SSH CONNECTION FOR USER: {self.get_option("remote_user")} on PORT {to_text(port)} TO {self.get_option("remote_addr")}',
+ host=self.get_option('remote_addr'))
+
+ ssh = paramiko.SSHClient()
+
+ # Set pubkey and hostkey algorithms to disable, the only manipulation allowed currently
+ # is keeping or omitting rsa-sha2 algorithms
+ # default_keys: t.Tuple[str] = ()
+ paramiko_preferred_pubkeys = getattr(paramiko.Transport, '_preferred_pubkeys', ())
+ paramiko_preferred_hostkeys = getattr(paramiko.Transport, '_preferred_keys', ())
+ use_rsa_sha2_algorithms = self.get_option('use_rsa_sha2_algorithms')
+ disabled_algorithms: t.Dict[str, t.Iterable[str]] = {}
+ if not use_rsa_sha2_algorithms:
+ if paramiko_preferred_pubkeys:
+ disabled_algorithms['pubkeys'] = tuple(a for a in paramiko_preferred_pubkeys if 'rsa-sha2' in a)
+ if paramiko_preferred_hostkeys:
+ disabled_algorithms['keys'] = tuple(a for a in paramiko_preferred_hostkeys if 'rsa-sha2' in a)
+
+ # override paramiko's default logger name
+ if self._log_channel is not None:
+ ssh.set_log_channel(self._log_channel)
+
+ self.keyfile = os.path.expanduser(self.get_option('user_known_hosts_file'))
+
+ if self.get_option('host_key_checking'):
+ for ssh_known_hosts in ('/etc/ssh/ssh_known_hosts', '/etc/openssh/ssh_known_hosts', self.keyfile):
+ try:
+ ssh.load_system_host_keys(ssh_known_hosts)
+ break
+ except IOError:
+ pass # file was not found, but not required to function
+ except paramiko.hostkeys.InvalidHostKey as e:
+ raise AnsibleConnectionFailure(f'Invalid host key: {to_text(e.line)}')
+ try:
+ ssh.load_system_host_keys()
+ except paramiko.hostkeys.InvalidHostKey as e:
+ raise AnsibleConnectionFailure(f'Invalid host key: {to_text(e.line)}')
+
+ ssh_connect_kwargs = self._parse_proxy_command(port)
+ ssh.set_missing_host_key_policy(MyAddPolicy(self))
+ conn_password = self.get_option('password')
+ allow_agent = True
+
+ if conn_password is not None:
+ allow_agent = False
+
+ try:
+ key_filename = None
+ if self.get_option('private_key_file'):
+ key_filename = os.path.expanduser(self.get_option('private_key_file'))
+
+ # paramiko 2.2 introduced auth_timeout parameter
+ if LooseVersion(paramiko.__version__) >= LooseVersion('2.2.0'):
+ ssh_connect_kwargs['auth_timeout'] = self.get_option('timeout')
+
+ # paramiko 1.15 introduced banner timeout parameter
+ if LooseVersion(paramiko.__version__) >= LooseVersion('1.15.0'):
+ ssh_connect_kwargs['banner_timeout'] = self.get_option('banner_timeout')
+
+ ssh.connect(
+ self.get_option('remote_addr').lower(),
+ username=self.get_option('remote_user'),
+ allow_agent=allow_agent,
+ look_for_keys=self.get_option('look_for_keys'),
+ key_filename=key_filename,
+ password=conn_password,
+ timeout=self.get_option('timeout'),
+ port=port,
+ disabled_algorithms=disabled_algorithms,
+ **ssh_connect_kwargs,
+ )
+ except paramiko.ssh_exception.BadHostKeyException as e:
+ raise AnsibleConnectionFailure(f'host key mismatch for {to_text(e.hostname)}')
+ except paramiko.ssh_exception.AuthenticationException as e:
+ msg = f'Failed to authenticate: {e}'
+ raise AnsibleAuthenticationFailure(msg)
+ except Exception as e:
+ msg = to_text(e)
+ if 'PID check failed' in msg:
+ raise AnsibleError('paramiko version issue, please upgrade paramiko on the machine running ansible')
+ elif 'Private key file is encrypted' in msg:
+ msg = (
+ f'ssh {self.get_option("remote_user")}@{self.get_options("remote_addr")}:{port} : '
+ f'{msg}\nTo connect as a different user, use -u .'
+ )
+ raise AnsibleConnectionFailure(msg)
+ else:
+ raise AnsibleConnectionFailure(msg)
+ self.ssh = ssh
+ self._connected = True
+ return self
+
+ def _any_keys_added(self) -> bool:
+ for hostname, keys in self.ssh._host_keys.items():
+ for keytype, key in keys.items():
+ added_this_time = getattr(key, '_added_by_ansible_this_time', False)
+ if added_this_time:
+ return True
+ return False
+
+ def _save_ssh_host_keys(self, filename: str) -> None:
+ """
+ not using the paramiko save_ssh_host_keys function as we want to add new SSH keys at the bottom so folks
+ don't complain about it :)
+ """
+
+ if not self._any_keys_added():
+ return
+
+ path = os.path.expanduser('~/.ssh')
+ makedirs_safe(path)
+
+ with open(filename, 'w') as f:
+ for hostname, keys in self.ssh._host_keys.items():
+ for keytype, key in keys.items():
+ # was f.write
+ added_this_time = getattr(key, '_added_by_ansible_this_time', False)
+ if not added_this_time:
+ f.write(f'{hostname} {keytype} {key.get_base64()}\n')
+
+ for hostname, keys in self.ssh._host_keys.items():
+ for keytype, key in keys.items():
+ added_this_time = getattr(key, '_added_by_ansible_this_time', False)
+ if added_this_time:
+ f.write(f'{hostname} {keytype} {key.get_base64()}\n')
+
+ def _build_wsl_command(self, cmd: str) -> str:
+ wsl_distribution = self.get_option('wsl_distribution')
+ become = self.get_option('become')
+ become_user = self.get_option('become_user')
+ if become and become_user:
+ wsl_user = become_user
+ else:
+ wsl_user = self.get_option('wsl_user')
+ args = ['wsl.exe', '--distribution', wsl_distribution]
+ if wsl_user:
+ args.extend(['--user', wsl_user])
+ args.extend(['--'])
+ args.extend(shlex.split(cmd))
+ if os.getenv('_ANSIBLE_TEST_WSL_CONNECTION_PLUGIN_Waeri5tepheeSha2fae8'):
+ return shlex.join(args)
+ return list2cmdline(args) # see https://github.com/python/cpython/blob/3.11/Lib/subprocess.py#L576
+
+ def exec_command(self, cmd: str, in_data: bytes | None = None, sudoable: bool = True) -> tuple[int, bytes, bytes]:
+ """ run a command on inside a WSL distribution """
+
+ cmd = self._build_wsl_command(cmd)
+
+ super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
+
+ bufsize = 4096
+
+ try:
+ self.ssh.get_transport().set_keepalive(5)
+ chan = self.ssh.get_transport().open_session()
+ except Exception as e:
+ text_e = to_text(e)
+ msg = 'Failed to open session'
+ if text_e:
+ msg += f': {text_e}'
+ raise AnsibleConnectionFailure(to_native(msg))
+
+ display.vvv(f'EXEC {cmd}', host=self.get_option('remote_addr'))
+
+ cmd = to_bytes(cmd, errors='surrogate_or_strict')
+
+ no_prompt_out = b''
+ no_prompt_err = b''
+ become_output = b''
+
+ try:
+ chan.exec_command(cmd)
+ if self.become and self.become.expect_prompt():
+ password_prompt = False
+ become_success = False
+ while not (become_success or password_prompt):
+ display.debug('Waiting for Privilege Escalation input')
+
+ chunk = chan.recv(bufsize)
+ display.debug(f'chunk is: {to_text(chunk)}')
+ if not chunk:
+ if b'unknown user' in become_output:
+ n_become_user = to_native(self.become.get_option('become_user'))
+ raise AnsibleError(f'user {n_become_user} does not exist')
+ else:
+ break
+ # raise AnsibleError('ssh connection closed waiting for password prompt')
+ become_output += chunk
+
+ # need to check every line because we might get lectured
+ # and we might get the middle of a line in a chunk
+ for line in become_output.splitlines(True):
+ if self.become.check_success(line):
+ become_success = True
+ break
+ elif self.become.check_password_prompt(line):
+ password_prompt = True
+ break
+
+ if password_prompt:
+ if self.become:
+ become_pass = self.become.get_option('become_pass')
+ chan.sendall(to_bytes(f"{become_pass}\n", errors='surrogate_or_strict'))
+ else:
+ raise AnsibleError('A password is required but none was supplied')
+ else:
+ no_prompt_out += become_output
+ no_prompt_err += become_output
+
+ if in_data:
+ for i in range(0, len(in_data), bufsize):
+ chan.send(in_data[i:i + bufsize])
+ chan.shutdown_write()
+ elif in_data == b'':
+ chan.shutdown_write()
+
+ except socket.timeout:
+ raise AnsibleError(f'ssh timed out waiting for privilege escalation.\n{to_text(become_output)}')
+
+ stdout = b''.join(chan.makefile('rb', bufsize))
+ stderr = b''.join(chan.makefile_stderr('rb', bufsize))
+ returncode = chan.recv_exit_status()
+
+ # NB the full english error message is:
+ # 'wsl.exe' is not recognized as an internal or external command,
+ # operable program or batch file.
+ if "'wsl.exe' is not recognized" in stderr.decode('utf-8'):
+ raise AnsibleError(
+ f'wsl.exe not found in path of host: {to_text(self.get_option("remote_addr"))}')
+
+ return (returncode, no_prompt_out + stdout, no_prompt_out + stderr)
+
+ def put_file(self, in_path: str, out_path: str) -> None:
+ """ transfer a file from local to remote """
+
+ display.vvv(f'PUT {in_path} TO {out_path}', host=self.get_option('remote_addr'))
+ try:
+ with open(in_path, 'rb') as f:
+ data = f.read()
+ returncode, stdout, stderr = self.exec_command(
+ f"{self._shell.executable} -c {self._shell.quote(f'cat > {out_path}')}",
+ in_data=data,
+ sudoable=False)
+ if returncode != 0:
+ if 'cat: not found' in stderr.decode('utf-8'):
+ raise AnsibleError(
+ f'cat not found in path of WSL distribution: {to_text(self.get_option("wsl_distribution"))}')
+ raise AnsibleError(
+ f'{to_text(stdout)}\n{to_text(stderr)}')
+ except Exception as e:
+ raise AnsibleError(
+ f'error occurred while putting file from {in_path} to {out_path}!\n{to_text(e)}')
+
+ def fetch_file(self, in_path: str, out_path: str) -> None:
+ """ save a remote file to the specified path """
+
+ display.vvv(f'FETCH {in_path} TO {out_path}', host=self.get_option('remote_addr'))
+ try:
+ returncode, stdout, stderr = self.exec_command(
+ f"{self._shell.executable} -c {self._shell.quote(f'cat {in_path}')}",
+ sudoable=False)
+ if returncode != 0:
+ if 'cat: not found' in stderr.decode('utf-8'):
+ raise AnsibleError(
+ f'cat not found in path of WSL distribution: {to_text(self.get_option("wsl_distribution"))}')
+ raise AnsibleError(
+ f'{to_text(stdout)}\n{to_text(stderr)}')
+ with open(out_path, 'wb') as f:
+ f.write(stdout)
+ except Exception as e:
+ raise AnsibleError(
+ f'error occurred while fetching file from {in_path} to {out_path}!\n{to_text(e)}')
+
+ def reset(self) -> None:
+ """ reset the connection """
+
+ if not self._connected:
+ return
+ self.close()
+ self._connect()
+
+ def close(self) -> None:
+ """ terminate the connection """
+
+ if self.get_option('host_key_checking') and self.get_option('record_host_keys') and self._any_keys_added():
+ # add any new SSH host keys -- warning -- this could be slow
+ # (This doesn't acquire the connection lock because it needs
+ # to exclude only other known_hosts writers, not connections
+ # that are starting up.)
+ lockfile = os.path.basename(self.keyfile)
+ dirname = os.path.dirname(self.keyfile)
+ makedirs_safe(dirname)
+ tmp_keyfile_name = None
+ try:
+ with FileLock().lock_file(lockfile, dirname, self.get_option('lock_file_timeout')):
+ # just in case any were added recently
+
+ self.ssh.load_system_host_keys()
+ self.ssh._host_keys.update(self.ssh._system_host_keys)
+
+ # gather information about the current key file, so
+ # we can ensure the new file has the correct mode/owner
+
+ key_dir = os.path.dirname(self.keyfile)
+ if os.path.exists(self.keyfile):
+ key_stat = os.stat(self.keyfile)
+ mode = key_stat.st_mode & 0o777
+ uid = key_stat.st_uid
+ gid = key_stat.st_gid
+ else:
+ mode = 0o644
+ uid = os.getuid()
+ gid = os.getgid()
+
+ # Save the new keys to a temporary file and move it into place
+ # rather than rewriting the file. We set delete=False because
+ # the file will be moved into place rather than cleaned up.
+
+ with tempfile.NamedTemporaryFile(dir=key_dir, delete=False) as tmp_keyfile:
+ tmp_keyfile_name = tmp_keyfile.name
+ os.chmod(tmp_keyfile_name, mode)
+ os.chown(tmp_keyfile_name, uid, gid)
+ self._save_ssh_host_keys(tmp_keyfile_name)
+
+ os.rename(tmp_keyfile_name, self.keyfile)
+ except LockTimeout:
+ raise AnsibleError(
+ f'writing lock file for {self.keyfile} ran in to the timeout of {self.get_option("lock_file_timeout")}s')
+ except paramiko.hostkeys.InvalidHostKey as e:
+ raise AnsibleConnectionFailure(f'Invalid host key: {e.line}')
+ except Exception as e:
+ # unable to save keys, including scenario when key was invalid
+ # and caught earlier
+ raise AnsibleError(
+ f'error occurred while writing SSH host keys!\n{to_text(e)}')
+ finally:
+ if tmp_keyfile_name is not None:
+ pathlib.Path(tmp_keyfile_name).unlink(missing_ok=True)
+
+ self.ssh.close()
+ self._connected = False
diff --git a/plugins/connection/zone.py b/plugins/connection/zone.py
index 34827c7e37..49b3188f44 100644
--- a/plugins/connection/zone.py
+++ b/plugins/connection/zone.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
# Based on local.py (c) 2012, Michael DeHaan
# and chroot.py (c) 2013, Maykel Moya
# and jail.py (c) 2013, Michael Scherer
@@ -8,32 +7,32 @@
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
-DOCUMENTATION = '''
- author: Ansible Core Team
- name: zone
- short_description: Run tasks in a zone instance
+DOCUMENTATION = r"""
+author: Ansible Core Team
+name: zone
+short_description: Run tasks in a zone instance
+description:
+ - Run commands or put/fetch files to an existing zone.
+options:
+ remote_addr:
description:
- - Run commands or put/fetch files to an existing zone
- options:
- remote_addr:
- description:
- - Zone identifier
- default: inventory_hostname
- vars:
- - name: ansible_host
- - name: ansible_zone_host
-'''
+ - Zone identifier.
+ type: string
+ default: inventory_hostname
+ vars:
+ - name: ansible_host
+ - name: ansible_zone_host
+"""
import os
import os.path
import subprocess
import traceback
+from shlex import quote as shlex_quote
from ansible.errors import AnsibleError
-from ansible.module_utils.six.moves import shlex_quote
from ansible.module_utils.common.process import get_bin_path
from ansible.module_utils.common.text.converters import to_bytes
from ansible.plugins.connection import ConnectionBase, BUFSIZE
@@ -61,14 +60,14 @@ class Connection(ConnectionBase):
self.zlogin_cmd = to_bytes(self._search_executable('zlogin'))
if self.zone not in self.list_zones():
- raise AnsibleError("incorrect zone name %s" % self.zone)
+ raise AnsibleError(f"incorrect zone name {self.zone}")
@staticmethod
def _search_executable(executable):
try:
return get_bin_path(executable)
except ValueError:
- raise AnsibleError("%s command not found in PATH" % executable)
+ raise AnsibleError(f"{executable} command not found in PATH")
def list_zones(self):
process = subprocess.Popen([self.zoneadm_cmd, 'list', '-ip'],
@@ -93,7 +92,7 @@ class Connection(ConnectionBase):
# stdout, stderr = p.communicate()
path = process.stdout.readlines()[0].split(':')[3]
- return path + '/root'
+ return f"{path}/root"
def _connect(self):
""" connect to the zone; nothing to do here """
@@ -116,7 +115,7 @@ class Connection(ConnectionBase):
local_cmd = [self.zlogin_cmd, self.zone, cmd]
local_cmd = map(to_bytes, local_cmd)
- display.vvv("EXEC %s" % (local_cmd), host=self.zone)
+ display.vvv(f"EXEC {local_cmd}", host=self.zone)
p = subprocess.Popen(local_cmd, shell=False, stdin=stdin,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
@@ -139,7 +138,7 @@ class Connection(ConnectionBase):
exist in any given chroot. So for now we're choosing "/" instead.
This also happens to be the former default.
- Can revisit using $HOME instead if it's a problem
+ Can revisit using $HOME instead if it is a problem
"""
if not remote_path.startswith(os.path.sep):
remote_path = os.path.join(os.path.sep, remote_path)
@@ -148,7 +147,7 @@ class Connection(ConnectionBase):
def put_file(self, in_path, out_path):
""" transfer a file from local to zone """
super(Connection, self).put_file(in_path, out_path)
- display.vvv("PUT %s TO %s" % (in_path, out_path), host=self.zone)
+ display.vvv(f"PUT {in_path} TO {out_path}", host=self.zone)
out_path = shlex_quote(self._prefix_login_path(out_path))
try:
@@ -158,27 +157,27 @@ class Connection(ConnectionBase):
else:
count = ''
try:
- p = self._buffered_exec_command('dd of=%s bs=%s%s' % (out_path, BUFSIZE, count), stdin=in_file)
+ p = self._buffered_exec_command(f'dd of={out_path} bs={BUFSIZE}{count}', stdin=in_file)
except OSError:
raise AnsibleError("jail connection requires dd command in the jail")
try:
stdout, stderr = p.communicate()
except Exception:
traceback.print_exc()
- raise AnsibleError("failed to transfer file %s to %s" % (in_path, out_path))
+ raise AnsibleError(f"failed to transfer file {in_path} to {out_path}")
if p.returncode != 0:
- raise AnsibleError("failed to transfer file %s to %s:\n%s\n%s" % (in_path, out_path, stdout, stderr))
+ raise AnsibleError(f"failed to transfer file {in_path} to {out_path}:\n{stdout}\n{stderr}")
except IOError:
- raise AnsibleError("file or module does not exist at: %s" % in_path)
+ raise AnsibleError(f"file or module does not exist at: {in_path}")
def fetch_file(self, in_path, out_path):
""" fetch a file from zone to local """
super(Connection, self).fetch_file(in_path, out_path)
- display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self.zone)
+ display.vvv(f"FETCH {in_path} TO {out_path}", host=self.zone)
in_path = shlex_quote(self._prefix_login_path(in_path))
try:
- p = self._buffered_exec_command('dd if=%s bs=%s' % (in_path, BUFSIZE))
+ p = self._buffered_exec_command(f'dd if={in_path} bs={BUFSIZE}')
except OSError:
raise AnsibleError("zone connection requires dd command in the zone")
@@ -190,10 +189,10 @@ class Connection(ConnectionBase):
chunk = p.stdout.read(BUFSIZE)
except Exception:
traceback.print_exc()
- raise AnsibleError("failed to transfer file %s to %s" % (in_path, out_path))
+ raise AnsibleError(f"failed to transfer file {in_path} to {out_path}")
stdout, stderr = p.communicate()
if p.returncode != 0:
- raise AnsibleError("failed to transfer file %s to %s:\n%s\n%s" % (in_path, out_path, stdout, stderr))
+ raise AnsibleError(f"failed to transfer file {in_path} to {out_path}:\n{stdout}\n{stderr}")
def close(self):
""" terminate the connection; nothing to do here """
diff --git a/plugins/doc_fragments/alicloud.py b/plugins/doc_fragments/alicloud.py
index f464e178c7..f0083c9946 100644
--- a/plugins/doc_fragments/alicloud.py
+++ b/plugins/doc_fragments/alicloud.py
@@ -1,109 +1,97 @@
-# -*- coding: utf-8 -*-
# Copyright (c) 2017-present Alibaba Group Holding Limited. He Guimin
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
class ModuleDocFragment(object):
# Alicloud only documentation fragment
- DOCUMENTATION = r'''
+ DOCUMENTATION = r"""
options:
alicloud_access_key:
description:
- - Alibaba Cloud access key. If not set then the value of environment variable C(ALICLOUD_ACCESS_KEY),
- C(ALICLOUD_ACCESS_KEY_ID) will be used instead.
+ - Alibaba Cloud access key. If not set then the value of environment variable E(ALICLOUD_ACCESS_KEY), E(ALICLOUD_ACCESS_KEY_ID)
+ is used instead.
aliases: ['access_key_id', 'access_key']
type: str
alicloud_secret_key:
description:
- - Alibaba Cloud secret key. If not set then the value of environment variable C(ALICLOUD_SECRET_KEY),
- C(ALICLOUD_SECRET_ACCESS_KEY) will be used instead.
+ - Alibaba Cloud secret key. If not set then the value of environment variable E(ALICLOUD_SECRET_KEY), E(ALICLOUD_SECRET_ACCESS_KEY)
+ is used instead.
aliases: ['secret_access_key', 'secret_key']
type: str
alicloud_region:
description:
- - The Alibaba Cloud region to use. If not specified then the value of environment variable
- C(ALICLOUD_REGION), C(ALICLOUD_REGION_ID) will be used instead.
+ - The Alibaba Cloud region to use. If not specified then the value of environment variable E(ALICLOUD_REGION), E(ALICLOUD_REGION_ID)
+ is used instead.
aliases: ['region', 'region_id']
required: true
type: str
alicloud_security_token:
description:
- - The Alibaba Cloud security token. If not specified then the value of environment variable
- C(ALICLOUD_SECURITY_TOKEN) will be used instead.
+ - The Alibaba Cloud security token. If not specified then the value of environment variable E(ALICLOUD_SECURITY_TOKEN)
+ is used instead.
aliases: ['security_token']
type: str
alicloud_assume_role:
description:
- - If provided with a role ARN, Ansible will attempt to assume this role using the supplied credentials.
- - The nested assume_role block supports I(alicloud_assume_role_arn), I(alicloud_assume_role_session_name),
- I(alicloud_assume_role_session_expiration) and I(alicloud_assume_role_policy)
+ - If provided with a role ARN, Ansible attempts to assume this role using the supplied credentials.
+ - The nested assume_role block supports C(alicloud_assume_role_arn), C(alicloud_assume_role_session_name), C(alicloud_assume_role_session_expiration)
+ and C(alicloud_assume_role_policy).
type: dict
aliases: ['assume_role']
alicloud_assume_role_arn:
description:
- - The Alibaba Cloud role_arn. The ARN of the role to assume. If ARN is set to an empty string,
- it does not perform role switching. It supports environment variable ALICLOUD_ASSUME_ROLE_ARN.
- ansible will execute with provided credentials.
+ - The Alibaba Cloud C(role_arn). The ARN of the role to assume. If ARN is set to an empty string, it does not perform
+ role switching. It supports environment variable E(ALICLOUD_ASSUME_ROLE_ARN). Ansible executes with provided credentials.
aliases: ['assume_role_arn']
type: str
alicloud_assume_role_session_name:
description:
- - The Alibaba Cloud session_name. The session name to use when assuming the role. If omitted,
- 'ansible' is passed to the AssumeRole call as session name. It supports environment variable
- ALICLOUD_ASSUME_ROLE_SESSION_NAME
+ - The Alibaba Cloud session_name. The session name to use when assuming the role. If omitted, 'ansible' is passed to
+ the AssumeRole call as session name. It supports environment variable E(ALICLOUD_ASSUME_ROLE_SESSION_NAME).
aliases: ['assume_role_session_name']
type: str
alicloud_assume_role_session_expiration:
description:
- - The Alibaba Cloud session_expiration. The time after which the established session for assuming
- role expires. Valid value range 900-3600 seconds. Default to 3600 (in this case Alicloud use own default
- value). It supports environment variable ALICLOUD_ASSUME_ROLE_SESSION_EXPIRATION
+ - The Alibaba Cloud C(session_expiration). The time after which the established session for assuming role expires. Valid
+ value range 900-3600 seconds. Default to 3600 (in this case Alicloud use own default value). It supports environment
+ variable E(ALICLOUD_ASSUME_ROLE_SESSION_EXPIRATION).
aliases: ['assume_role_session_expiration']
type: int
ecs_role_name:
description:
- - The RAM Role Name attached on a ECS instance for API operations. You can retrieve this from the 'Access Control'
- section of the Alibaba Cloud console.
- - If you're running Ansible from an ECS instance with RAM Instance using RAM Role, Ansible will just access the
- metadata U(http://100.100.100.200/latest/meta-data/ram/security-credentials/) to obtain the STS
- credential. This is a preferred approach over any other when running in ECS as you can avoid hard coding
- credentials. Instead these are leased on-the-fly by Ansible which reduces the chance of leakage.
+ - The RAM Role Name attached on a ECS instance for API operations. You can retrieve this from the 'Access Control' section
+ of the Alibaba Cloud console.
+ - If you are running Ansible from an ECS instance with RAM Instance using RAM Role, Ansible just accesses the metadata
+ U(http://100.100.100.200/latest/meta-data/ram/security-credentials/) to obtain the STS credential.
+ This is a preferred approach over any other when running in ECS as you can avoid hard coding credentials. Instead
+ these are leased on-the-fly by Ansible which reduces the chance of leakage.
aliases: ['role_name']
type: str
profile:
description:
- - This is the Alicloud profile name as set in the shared credentials file. It can also be sourced from the
- ALICLOUD_PROFILE environment variable.
+ - This is the Alicloud profile name as set in the shared credentials file. It can also be sourced from the E(ALICLOUD_PROFILE)
+ environment variable.
type: str
shared_credentials_file:
description:
- - This is the path to the shared credentials file. It can also be sourced from the ALICLOUD_SHARED_CREDENTIALS_FILE
+ - This is the path to the shared credentials file. It can also be sourced from the E(ALICLOUD_SHARED_CREDENTIALS_FILE)
environment variable.
- - If this is not set and a profile is specified, ~/.aliyun/config.json will be used.
+ - If this is not set and a profile is specified, C(~/.aliyun/config.json) is used.
type: str
author:
- - "He Guimin (@xiaozhu36)"
+ - "He Guimin (@xiaozhu36)"
requirements:
- - "python >= 3.6"
+ - "Python >= 3.6"
notes:
- - If parameters are not set within the module, the following
- environment variables can be used in decreasing order of precedence
- C(ALICLOUD_ACCESS_KEY) or C(ALICLOUD_ACCESS_KEY_ID),
- C(ALICLOUD_SECRET_KEY) or C(ALICLOUD_SECRET_ACCESS_KEY),
- C(ALICLOUD_REGION) or C(ALICLOUD_REGION_ID),
- C(ALICLOUD_SECURITY_TOKEN),
- C(ALICLOUD_ECS_ROLE_NAME),
- C(ALICLOUD_SHARED_CREDENTIALS_FILE),
- C(ALICLOUD_PROFILE),
- C(ALICLOUD_ASSUME_ROLE_ARN),
- C(ALICLOUD_ASSUME_ROLE_SESSION_NAME),
- C(ALICLOUD_ASSUME_ROLE_SESSION_EXPIRATION),
- - C(ALICLOUD_REGION) or C(ALICLOUD_REGION_ID) can be typically be used to specify the
- ALICLOUD region, when required, but this can also be configured in the footmark config file
-'''
+ - If parameters are not set within the module, the following environment variables can be used in decreasing order of precedence
+ E(ALICLOUD_ACCESS_KEY) or E(ALICLOUD_ACCESS_KEY_ID), E(ALICLOUD_SECRET_KEY) or E(ALICLOUD_SECRET_ACCESS_KEY), E(ALICLOUD_REGION)
+ or E(ALICLOUD_REGION_ID), E(ALICLOUD_SECURITY_TOKEN), E(ALICLOUD_ECS_ROLE_NAME), E(ALICLOUD_SHARED_CREDENTIALS_FILE),
+ E(ALICLOUD_PROFILE), E(ALICLOUD_ASSUME_ROLE_ARN), E(ALICLOUD_ASSUME_ROLE_SESSION_NAME), E(ALICLOUD_ASSUME_ROLE_SESSION_EXPIRATION).
+ - E(ALICLOUD_REGION) or E(ALICLOUD_REGION_ID) can be typically be used to specify the Alicloud region, when required, but
+ this can also be configured in the footmark config file.
+"""
diff --git a/plugins/doc_fragments/attributes.py b/plugins/doc_fragments/attributes.py
new file mode 100644
index 0000000000..fdafe1aeaa
--- /dev/null
+++ b/plugins/doc_fragments/attributes.py
@@ -0,0 +1,91 @@
+
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import annotations
+
+
+class ModuleDocFragment(object):
+
+ # Standard documentation fragment
+ DOCUMENTATION = r"""
+options: {}
+attributes:
+ check_mode:
+ description: Can run in C(check_mode) and return changed status prediction without modifying target.
+ diff_mode:
+ description: Returns details on what has changed (or possibly needs changing in C(check_mode)), when in diff mode.
+"""
+
+ PLATFORM = r"""
+options: {}
+attributes:
+ platform:
+ description: Target OS/families that can be operated against.
+ support: N/A
+"""
+
+ # Should be used together with the standard fragment
+ INFO_MODULE = r'''
+options: {}
+attributes:
+ check_mode:
+ support: full
+ details:
+ - This action does not modify state.
+ diff_mode:
+ support: N/A
+ details:
+ - This action does not modify state.
+'''
+
+ CONN = r"""
+options: {}
+attributes:
+ become:
+ description: Is usable alongside C(become) keywords.
+ connection:
+ description: Uses the target's configured connection information to execute code on it.
+ delegation:
+ description: Can be used in conjunction with C(delegate_to) and related keywords.
+"""
+
+ FACTS = r"""
+options: {}
+attributes:
+ facts:
+ description: Action returns an C(ansible_facts) dictionary that updates existing host facts.
+"""
+
+ # Should be used together with the standard fragment and the FACTS fragment
+ FACTS_MODULE = r'''
+options: {}
+attributes:
+ check_mode:
+ support: full
+ details:
+ - This action does not modify state.
+ diff_mode:
+ support: N/A
+ details:
+ - This action does not modify state.
+ facts:
+ support: full
+'''
+
+ FILES = r"""
+options: {}
+attributes:
+ safe_file_operations:
+ description: Uses Ansible's strict file operation functions to ensure proper permissions and avoid data corruption.
+"""
+
+ FLOW = r"""
+options: {}
+attributes:
+ action:
+ description: Indicates this has a corresponding action plugin so some parts of the options can be executed on the controller.
+ async:
+ description: Supports being used with the C(async) keyword.
+"""
diff --git a/plugins/doc_fragments/auth_basic.py b/plugins/doc_fragments/auth_basic.py
index e05311af03..3d99466165 100644
--- a/plugins/doc_fragments/auth_basic.py
+++ b/plugins/doc_fragments/auth_basic.py
@@ -1,32 +1,30 @@
-# -*- coding: utf-8 -*-
# Copyright (c) Ansible project
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
class ModuleDocFragment(object):
# Standard files documentation fragment
- DOCUMENTATION = r'''
+ DOCUMENTATION = r"""
options:
api_url:
description:
- - The resolvable endpoint for the API
+ - The resolvable endpoint for the API.
type: str
api_username:
description:
- - The username to use for authentication against the API
+ - The username to use for authentication against the API.
type: str
api_password:
description:
- - The password to use for authentication against the API
+ - The password to use for authentication against the API.
type: str
validate_certs:
description:
- - Whether or not to validate SSL certs when supplying a https endpoint.
+ - Whether or not to validate SSL certs when supplying a HTTPS endpoint.
type: bool
- default: yes
-'''
+ default: true
+"""
diff --git a/plugins/doc_fragments/bitbucket.py b/plugins/doc_fragments/bitbucket.py
index 9ab6fe318d..c96a010e71 100644
--- a/plugins/doc_fragments/bitbucket.py
+++ b/plugins/doc_fragments/bitbucket.py
@@ -1,42 +1,42 @@
-# -*- coding: utf-8 -*-
# Copyright (c) 2019, Evgeniy Krysanov
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
class ModuleDocFragment(object):
# Standard documentation fragment
- DOCUMENTATION = r'''
+ DOCUMENTATION = r"""
options:
client_id:
description:
- The OAuth consumer key.
- - If not set the environment variable C(BITBUCKET_CLIENT_ID) will be used.
+ - If not set the environment variable E(BITBUCKET_CLIENT_ID) is used.
type: str
client_secret:
description:
- The OAuth consumer secret.
- - If not set the environment variable C(BITBUCKET_CLIENT_SECRET) will be used.
+ - If not set the environment variable E(BITBUCKET_CLIENT_SECRET) is used.
type: str
user:
description:
- The username.
- - If not set the environment variable C(BITBUCKET_USERNAME) will be used.
+ - If not set the environment variable E(BITBUCKET_USERNAME) is used.
+ - O(ignore:username) is an alias of O(user) since community.general 6.0.0. It was an alias of O(workspace) before.
type: str
version_added: 4.0.0
+ aliases: [username]
password:
description:
- The App password.
- - If not set the environment variable C(BITBUCKET_PASSWORD) will be used.
+ - If not set the environment variable E(BITBUCKET_PASSWORD) is used.
type: str
version_added: 4.0.0
notes:
- Bitbucket OAuth consumer key and secret can be obtained from Bitbucket profile -> Settings -> Access Management -> OAuth.
- Bitbucket App password can be created from Bitbucket profile -> Personal Settings -> App passwords.
- If both OAuth and Basic Auth credentials are passed, OAuth credentials take precedence.
-'''
+"""
diff --git a/plugins/doc_fragments/consul.py b/plugins/doc_fragments/consul.py
new file mode 100644
index 0000000000..fd9c1a6e6c
--- /dev/null
+++ b/plugins/doc_fragments/consul.py
@@ -0,0 +1,55 @@
+# Copyright (c) Ansible project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import annotations
+
+
+class ModuleDocFragment:
+ # Common parameters for Consul modules
+ DOCUMENTATION = r"""
+options:
+ host:
+ description:
+ - Host of the Consul agent.
+ default: localhost
+ type: str
+ port:
+ type: int
+ description:
+ - The port on which the consul agent is running.
+ default: 8500
+ scheme:
+ description:
+ - The protocol scheme on which the Consul agent is running. Defaults to V(http) and can be set to V(https) for secure
+ connections.
+ default: http
+ type: str
+ validate_certs:
+ type: bool
+ description:
+ - Whether to verify the TLS certificate of the Consul agent.
+ default: true
+ ca_path:
+ description:
+ - The CA bundle to use for https connections.
+ type: str
+"""
+
+ TOKEN = r"""
+options:
+ token:
+ description:
+ - The token to use for authorization.
+ type: str
+"""
+
+ ACTIONGROUP_CONSUL = r"""
+options: {}
+attributes:
+ action_group:
+ description: Use C(group/community.general.consul) in C(module_defaults) to set defaults for this module.
+ support: full
+ membership:
+ - community.general.consul
+"""
diff --git a/plugins/doc_fragments/dimensiondata.py b/plugins/doc_fragments/dimensiondata.py
index f754f9cc76..1804c3c7ba 100644
--- a/plugins/doc_fragments/dimensiondata.py
+++ b/plugins/doc_fragments/dimensiondata.py
@@ -1,11 +1,9 @@
-# -*- coding: utf-8 -*-
#
# Copyright (c) 2016, Dimension Data
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
# Authors:
# - Adam Friedman
@@ -14,28 +12,27 @@ __metaclass__ = type
class ModuleDocFragment(object):
# Dimension Data doc fragment
- DOCUMENTATION = r'''
-
+ DOCUMENTATION = r"""
options:
region:
description:
- The target region.
- - Regions are defined in Apache libcloud project [libcloud/common/dimensiondata.py]
- - They are also listed in U(https://libcloud.readthedocs.io/en/latest/compute/drivers/dimensiondata.html)
- - Note that the default value "na" stands for "North America".
- - The module prepends 'dd-' to the region choice.
+ - Regions are defined in Apache libcloud project [libcloud/common/dimensiondata.py].
+ - They are also listed in U(https://libcloud.readthedocs.io/en/latest/compute/drivers/dimensiondata.html).
+ - Note that the default value C(na) stands for "North America".
+ - The module prepends C(dd-) to the region choice.
type: str
default: na
mcp_user:
description:
- The username used to authenticate to the CloudControl API.
- - If not specified, will fall back to C(MCP_USER) from environment variable or C(~/.dimensiondata).
+ - If not specified, falls back to E(MCP_USER) from environment variable or C(~/.dimensiondata).
type: str
mcp_password:
description:
- The password used to authenticate to the CloudControl API.
- - If not specified, will fall back to C(MCP_PASSWORD) from environment variable or C(~/.dimensiondata).
- - Required if I(mcp_user) is specified.
+ - If not specified, falls back to E(MCP_PASSWORD) from environment variable or C(~/.dimensiondata).
+ - Required if O(mcp_user) is specified.
type: str
location:
description:
@@ -44,8 +41,8 @@ options:
required: true
validate_certs:
description:
- - If C(false), SSL certificates will not be validated.
+ - If V(false), SSL certificates are not validated.
- This should only be used on private instances of the CloudControl API that use self-signed certificates.
type: bool
- default: yes
-'''
+ default: true
+"""
diff --git a/plugins/doc_fragments/dimensiondata_wait.py b/plugins/doc_fragments/dimensiondata_wait.py
index 509f5c56fb..40b3a1d6e8 100644
--- a/plugins/doc_fragments/dimensiondata_wait.py
+++ b/plugins/doc_fragments/dimensiondata_wait.py
@@ -1,11 +1,9 @@
-# -*- coding: utf-8 -*-
#
# Copyright (c) 2016, Dimension Data
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
# Authors:
# - Adam Friedman
@@ -14,24 +12,23 @@ __metaclass__ = type
class ModuleDocFragment(object):
# Dimension Data ("wait-for-completion" parameters) doc fragment
- DOCUMENTATION = r'''
-
+ DOCUMENTATION = r"""
options:
wait:
description:
- Should we wait for the task to complete before moving onto the next.
type: bool
- default: no
+ default: false
wait_time:
description:
- The maximum amount of time (in seconds) to wait for the task to complete.
- - Only applicable if I(wait=true).
+ - Only applicable if O(wait=true).
type: int
default: 600
wait_poll_interval:
description:
- The amount of time (in seconds) to wait between checks for task completion.
- - Only applicable if I(wait=true).
+ - Only applicable if O(wait=true).
type: int
default: 2
- '''
+"""
diff --git a/plugins/doc_fragments/django.py b/plugins/doc_fragments/django.py
new file mode 100644
index 0000000000..f62e2224d8
--- /dev/null
+++ b/plugins/doc_fragments/django.py
@@ -0,0 +1,80 @@
+# Copyright (c) 2024, Alexei Znamensky
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import annotations
+
+
+class ModuleDocFragment(object):
+ DOCUMENTATION = r"""
+options:
+ venv:
+ description:
+ - Use the Python interpreter from this virtual environment.
+ - Pass the path to the root of the virtualenv, not the C(bin/) directory nor the C(python) executable.
+ type: path
+ settings:
+ description:
+ - Specifies the settings module to use.
+ - The value is passed as is to the C(--settings) argument in C(django-admin).
+ type: str
+ required: true
+ pythonpath:
+ description:
+ - Adds the given filesystem path to the Python import search path.
+ - The value is passed as is to the C(--pythonpath) argument in C(django-admin).
+ type: path
+ traceback:
+ description:
+ - Provides a full stack trace in the output when a C(CommandError) is raised.
+ type: bool
+ verbosity:
+ description:
+ - Specifies the amount of notification and debug information in the output of C(django-admin).
+ type: int
+ choices: [0, 1, 2, 3]
+ skip_checks:
+ description:
+ - Skips running system checks prior to running the command.
+ type: bool
+
+
+notes:
+ - The C(django-admin) command is always executed using the C(C) locale, and the option C(--no-color) is always passed.
+seealso:
+ - name: django-admin and manage.py in official Django documentation
+ description: >-
+ Refer to this documentation for the builtin commands and options of C(django-admin). Please make sure that you select
+ the right version of Django in the version selector on that page.
+ link: https://docs.djangoproject.com/en/5.0/ref/django-admin/
+"""
+
+ DATABASE = r"""
+options:
+ database:
+ description:
+ - Specify the database to be used.
+ type: str
+ default: default
+"""
+
+ DATA = r"""
+options:
+ excludes:
+ description:
+ - Applications or models to be excluded.
+ - Format must be either V(app_label) or V(app_label.ModelName).
+ type: list
+ elements: str
+ format:
+ description:
+ - Serialization format of the output data.
+ type: str
+ default: json
+ choices: [xml, json, jsonl, yaml]
+notes:
+ - As it is now, the module is B(not idempotent). Ensuring idempotency for this case can be a bit tricky, because it would
+ amount to ensuring beforehand that all the data in the fixture file is already in the database, which is not a trivial feat.
+ Unfortunately, neither C(django loaddata) nor C(django dumpdata) have a C(--dry-run) option, so the only way to know whether
+ there is a change or not is to actually load or dump the data.
+"""
diff --git a/plugins/doc_fragments/emc.py b/plugins/doc_fragments/emc.py
index e9e57a2c10..9268b7fc42 100644
--- a/plugins/doc_fragments/emc.py
+++ b/plugins/doc_fragments/emc.py
@@ -1,46 +1,34 @@
-# -*- coding: utf-8 -*-
# Copyright (c) 2018, Luca Lorenzetto (@remix_tj)
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
class ModuleDocFragment(object):
- DOCUMENTATION = r'''
-options:
- - See respective platform section for more details
-requirements:
- - See respective platform section for more details
-notes:
- - Ansible modules are available for EMC VNX.
-'''
-
# Documentation fragment for VNX (emc_vnx)
EMC_VNX = r'''
options:
- sp_address:
- description:
- - Address of the SP of target/secondary storage.
- type: str
- required: true
- sp_user:
- description:
- - Username for accessing SP.
- type: str
- default: sysadmin
- sp_password:
- description:
- - password for accessing SP.
- type: str
- default: sysadmin
+ sp_address:
+ description:
+ - Address of the SP of target/secondary storage.
+ type: str
+ required: true
+ sp_user:
+ description:
+ - Username for accessing SP.
+ type: str
+ default: sysadmin
+ sp_password:
+ description:
+ - password for accessing SP.
+ type: str
+ default: sysadmin
requirements:
- An EMC VNX Storage device.
- - Ansible 2.7.
- - storops (0.5.10 or greater). Install using 'pip install storops'.
+ - storops (0.5.10 or greater). Install using C(pip install storops).
notes:
- - The modules prefixed with emc_vnx are built to support the EMC VNX storage platform.
+ - The modules prefixed with C(emc_vnx) are built to support the EMC VNX storage platform.
'''
diff --git a/plugins/doc_fragments/gitlab.py b/plugins/doc_fragments/gitlab.py
index 705a93c023..af7a527a81 100644
--- a/plugins/doc_fragments/gitlab.py
+++ b/plugins/doc_fragments/gitlab.py
@@ -1,16 +1,14 @@
-# -*- coding: utf-8 -*-
# Copyright (c) Ansible project
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
class ModuleDocFragment(object):
# Standard files documentation fragment
- DOCUMENTATION = r'''
+ DOCUMENTATION = r"""
requirements:
- requests (Python library U(https://pypi.org/project/requests/))
@@ -29,4 +27,9 @@ options:
- GitLab CI job token for logging in.
type: str
version_added: 4.2.0
-'''
+ ca_path:
+ description:
+ - The CA certificates bundle to use to verify GitLab server certificate.
+ type: str
+ version_added: 8.1.0
+"""
diff --git a/plugins/doc_fragments/hpe3par.py b/plugins/doc_fragments/hpe3par.py
index 96e53846e1..e126c63c56 100644
--- a/plugins/doc_fragments/hpe3par.py
+++ b/plugins/doc_fragments/hpe3par.py
@@ -1,36 +1,33 @@
-# -*- coding: utf-8 -*-
# Copyright (c) 2018, Hewlett Packard Enterprise Development LP
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
class ModuleDocFragment(object):
# HPE 3PAR doc fragment
- DOCUMENTATION = '''
+ DOCUMENTATION = r"""
options:
- storage_system_ip:
- description:
- - The storage system IP address.
- type: str
- required: true
- storage_system_password:
- description:
- - The storage system password.
- type: str
- required: true
- storage_system_username:
- description:
- - The storage system user name.
- type: str
- required: true
+ storage_system_ip:
+ description:
+ - The storage system IP address.
+ type: str
+ required: true
+ storage_system_password:
+ description:
+ - The storage system password.
+ type: str
+ required: true
+ storage_system_username:
+ description:
+ - The storage system user name.
+ type: str
+ required: true
requirements:
- - hpe3par_sdk >= 1.0.2. Install using 'pip install hpe3par_sdk'
+ - hpe3par_sdk >= 1.0.2. Install using C(pip install hpe3par_sdk).
- WSAPI service should be enabled on the 3PAR storage array.
notes:
- - check_mode not supported
- '''
+"""
diff --git a/plugins/doc_fragments/hwc.py b/plugins/doc_fragments/hwc.py
index d3cebb6dbc..99362243ec 100644
--- a/plugins/doc_fragments/hwc.py
+++ b/plugins/doc_fragments/hwc.py
@@ -1,66 +1,57 @@
-# -*- coding: utf-8 -*-
# Copyright (c) 2018, Huawei Inc.
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
class ModuleDocFragment(object):
# HWC doc fragment.
- DOCUMENTATION = '''
+ DOCUMENTATION = r"""
options:
- identity_endpoint:
- description:
- - The Identity authentication URL.
- type: str
- required: true
- user:
- description:
- - The user name to login with (currently only user names are
- supported, and not user IDs).
- type: str
- required: true
- password:
- description:
- - The password to login with.
- type: str
- required: true
- domain:
- description:
- - The name of the Domain to scope to (Identity v3).
- (currently only domain names are supported, and not domain IDs).
- type: str
- required: true
- project:
- description:
- - The name of the Tenant (Identity v2) or Project (Identity v3).
- (currently only project names are supported, and not
- project IDs).
- type: str
- required: true
- region:
- description:
- - The region to which the project belongs.
- type: str
- id:
- description:
- - The id of resource to be managed.
- type: str
+ identity_endpoint:
+ description:
+ - The Identity authentication URL.
+ type: str
+ required: true
+ user:
+ description:
+ - The user name to login with.
+ - Currently only user names are supported, and not user IDs.
+ type: str
+ required: true
+ password:
+ description:
+ - The password to login with.
+ type: str
+ required: true
+ domain:
+ description:
+ - The name of the Domain to scope to (Identity v3).
+ - Currently only domain names are supported, and not domain IDs.
+ type: str
+ required: true
+ project:
+ description:
+ - The name of the Tenant (Identity v2) or Project (Identity v3).
+ - Currently only project names are supported, and not project IDs.
+ type: str
+ required: true
+ region:
+ description:
+ - The region to which the project belongs.
+ type: str
+ id:
+ description:
+ - The ID of resource to be managed.
+ type: str
notes:
- - For authentication, you can set identity_endpoint using the
- C(ANSIBLE_HWC_IDENTITY_ENDPOINT) env variable.
- - For authentication, you can set user using the
- C(ANSIBLE_HWC_USER) env variable.
- - For authentication, you can set password using the C(ANSIBLE_HWC_PASSWORD) env
- variable.
- - For authentication, you can set domain using the C(ANSIBLE_HWC_DOMAIN) env
- variable.
- - For authentication, you can set project using the C(ANSIBLE_HWC_PROJECT) env
- variable.
- - For authentication, you can set region using the C(ANSIBLE_HWC_REGION) env variable.
- - Environment variables values will only be used if the playbook values are
- not set.
-'''
+ - For authentication, you can set identity_endpoint using the E(ANSIBLE_HWC_IDENTITY_ENDPOINT) environment variable.
+ - For authentication, you can set user using the E(ANSIBLE_HWC_USER) environment variable.
+ - For authentication, you can set password using the E(ANSIBLE_HWC_PASSWORD) environment variable.
+ - For authentication, you can set domain using the E(ANSIBLE_HWC_DOMAIN) environment variable.
+ - For authentication, you can set project using the E(ANSIBLE_HWC_PROJECT) environment variable.
+ - For authentication, you can set region using the E(ANSIBLE_HWC_REGION) environment variable.
+ - Environment variables values are only used when the playbook values are not set.
+"""
diff --git a/plugins/doc_fragments/ibm_storage.py b/plugins/doc_fragments/ibm_storage.py
index ec3b0a0e05..ab61cd51c1 100644
--- a/plugins/doc_fragments/ibm_storage.py
+++ b/plugins/doc_fragments/ibm_storage.py
@@ -1,38 +1,34 @@
-# -*- coding: utf-8 -*-
# Copyright (c) 2018, IBM CORPORATION
# Author(s): Tzur Eliyahu
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
+from __future__ import annotations
class ModuleDocFragment(object):
# ibm_storage documentation fragment
- DOCUMENTATION = r'''
+ DOCUMENTATION = r"""
options:
- username:
- description:
- - Management user on the spectrum accelerate storage system.
- type: str
- required: True
- password:
- description:
- - Password for username on the spectrum accelerate storage system.
- type: str
- required: True
- endpoints:
- description:
- - The hostname or management IP of Spectrum Accelerate storage system.
- type: str
- required: True
+ username:
+ description:
+ - Management user on the Spectrum Accelerate storage system.
+ type: str
+ required: true
+ password:
+ description:
+ - Password for username on the Spectrum Accelerate storage system.
+ type: str
+ required: true
+ endpoints:
+ description:
+ - The hostname or management IP of Spectrum Accelerate storage system.
+ type: str
+ required: true
notes:
- - This module requires pyxcli python library.
- Use 'pip install pyxcli' in order to get pyxcli.
+ - This module requires pyxcli python library. Use C(pip install pyxcli) in order to get pyxcli.
requirements:
- - python >= 2.7
- pyxcli
-'''
+"""
diff --git a/plugins/doc_fragments/influxdb.py b/plugins/doc_fragments/influxdb.py
index 133041a628..7f0688b868 100644
--- a/plugins/doc_fragments/influxdb.py
+++ b/plugins/doc_fragments/influxdb.py
@@ -1,83 +1,80 @@
-# -*- coding: utf-8 -*-
# Copyright (c) 2017, Ansible Project
# Copyright (c) 2017, Abhijeet Kasurde (akasurde@redhat.com)
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
class ModuleDocFragment(object):
# Parameters for influxdb modules
- DOCUMENTATION = r'''
+ DOCUMENTATION = r"""
options:
hostname:
description:
- - The hostname or IP address on which InfluxDB server is listening.
- - Since Ansible 2.5, defaulted to localhost.
+ - The hostname or IP address on which InfluxDB server is listening.
type: str
default: localhost
username:
description:
- - Username that will be used to authenticate against InfluxDB server.
- - Alias C(login_username) added in Ansible 2.5.
+ - Username that is used to authenticate against InfluxDB server.
type: str
default: root
- aliases: [ login_username ]
+ aliases: [login_username]
password:
description:
- - Password that will be used to authenticate against InfluxDB server.
- - Alias C(login_password) added in Ansible 2.5.
+ - Password that is used to authenticate against InfluxDB server.
type: str
default: root
- aliases: [ login_password ]
+ aliases: [login_password]
port:
description:
- - The port on which InfluxDB server is listening
+ - The port on which InfluxDB server is listening.
type: int
default: 8086
path:
description:
- - The path on which InfluxDB server is accessible
- - Only available when using python-influxdb >= 5.1.0
+ - The path on which InfluxDB server is accessible.
+ - Only available when using python-influxdb >= 5.1.0.
type: str
+ default: ''
version_added: '0.2.0'
validate_certs:
description:
- - If set to C(no), the SSL certificates will not be validated.
- - This should only set to C(no) used on personally controlled sites using self-signed certificates.
+ - If set to V(false), the SSL certificates are not validated.
+ - This should only set to V(false) used on personally controlled sites using self-signed certificates.
type: bool
- default: yes
+ default: true
ssl:
description:
- - Use https instead of http to connect to InfluxDB server.
+ - Use https instead of http to connect to InfluxDB server.
type: bool
default: false
timeout:
description:
- - Number of seconds Requests will wait for client to establish a connection.
+ - Number of seconds Requests waits for client to establish a connection.
type: int
retries:
description:
- - Number of retries client will try before aborting.
- - C(0) indicates try until success.
- - Only available when using python-influxdb >= 4.1.0
+ - Number of retries client performs before aborting.
+ - V(0) indicates try until success.
+ - Only available when using C(python-influxdb) >= 4.1.0.
type: int
default: 3
use_udp:
description:
- - Use UDP to connect to InfluxDB server.
+ - Use UDP to connect to InfluxDB server.
type: bool
default: false
udp_port:
description:
- - UDP port to connect to InfluxDB server.
+ - UDP port to connect to InfluxDB server.
type: int
default: 4444
proxies:
description:
- - HTTP(S) proxy to use for Requests to connect to InfluxDB server.
+ - HTTP(S) proxy to use for Requests to connect to InfluxDB server.
type: dict
-'''
+ default: {}
+"""
diff --git a/plugins/doc_fragments/ipa.py b/plugins/doc_fragments/ipa.py
index d799bac184..0b740ae8ed 100644
--- a/plugins/doc_fragments/ipa.py
+++ b/plugins/doc_fragments/ipa.py
@@ -1,76 +1,83 @@
-# -*- coding: utf-8 -*-
# Copyright (c) 2017-18, Ansible Project
# Copyright (c) 2017-18, Abhijeet Kasurde (akasurde@redhat.com)
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
class ModuleDocFragment(object):
# Parameters for FreeIPA/IPA modules
- DOCUMENTATION = r'''
+ DOCUMENTATION = r"""
options:
ipa_port:
description:
- - Port of FreeIPA / IPA server.
- - If the value is not specified in the task, the value of environment variable C(IPA_PORT) will be used instead.
- - If both the environment variable C(IPA_PORT) and the value are not specified in the task, then default value is set.
- - Environment variable fallback mechanism is added in Ansible 2.5.
+ - Port of FreeIPA / IPA server.
+ - If the value is not specified in the task, the value of environment variable E(IPA_PORT) is used instead.
+ - If both the environment variable E(IPA_PORT) and the value are not specified in the task, then default value is set.
type: int
default: 443
ipa_host:
description:
- - IP or hostname of IPA server.
- - If the value is not specified in the task, the value of environment variable C(IPA_HOST) will be used instead.
- - If both the environment variable C(IPA_HOST) and the value are not specified in the task, then DNS will be used to try to discover the FreeIPA server.
- - The relevant entry needed in FreeIPA is the 'ipa-ca' entry.
- - If neither the DNS entry, nor the environment C(IPA_HOST), nor the value are available in the task, then the default value will be used.
- - Environment variable fallback mechanism is added in Ansible 2.5.
+ - IP or hostname of IPA server.
+ - If the value is not specified in the task, the value of environment variable E(IPA_HOST) is used instead.
+ - If both the environment variable E(IPA_HOST) and the value are not specified in the task, then DNS is used to try
+ to discover the FreeIPA server.
+ - The relevant entry needed in FreeIPA is the C(ipa-ca) entry.
+ - If neither the DNS entry, nor the environment E(IPA_HOST), nor the value are available in the task, then the default
+ value is used.
type: str
default: ipa.example.com
ipa_user:
description:
- - Administrative account used on IPA server.
- - If the value is not specified in the task, the value of environment variable C(IPA_USER) will be used instead.
- - If both the environment variable C(IPA_USER) and the value are not specified in the task, then default value is set.
- - Environment variable fallback mechanism is added in Ansible 2.5.
+ - Administrative account used on IPA server.
+ - If the value is not specified in the task, the value of environment variable E(IPA_USER) is used instead.
+ - If both the environment variable E(IPA_USER) and the value are not specified in the task, then default value is set.
type: str
default: admin
ipa_pass:
description:
- - Password of administrative user.
- - If the value is not specified in the task, the value of environment variable C(IPA_PASS) will be used instead.
- - Note that if the 'urllib_gssapi' library is available, it is possible to use GSSAPI to authenticate to FreeIPA.
- - If the environment variable C(KRB5CCNAME) is available, the module will use this kerberos credentials cache to authenticate to the FreeIPA server.
- - If the environment variable C(KRB5_CLIENT_KTNAME) is available, and C(KRB5CCNAME) is not; the module will use this kerberos keytab to authenticate.
- - If GSSAPI is not available, the usage of 'ipa_pass' is required.
- - Environment variable fallback mechanism is added in Ansible 2.5.
+ - Password of administrative user.
+ - If the value is not specified in the task, the value of environment variable E(IPA_PASS) is used instead.
+ - Note that if the C(urllib_gssapi) library is available, it is possible to use GSSAPI to authenticate to FreeIPA.
+ - If the environment variable E(KRB5CCNAME) is available, the module uses this Kerberos credentials cache to authenticate
+ to the FreeIPA server.
+ - If the environment variable E(KRB5_CLIENT_KTNAME) is available, and E(KRB5CCNAME) is not; the module uses this Kerberos
+ keytab to authenticate.
+ - If GSSAPI is not available, the usage of O(ipa_pass) is required.
type: str
ipa_prot:
description:
- - Protocol used by IPA server.
- - If the value is not specified in the task, the value of environment variable C(IPA_PROT) will be used instead.
- - If both the environment variable C(IPA_PROT) and the value are not specified in the task, then default value is set.
- - Environment variable fallback mechanism is added in Ansible 2.5.
+ - Protocol used by IPA server.
+ - If the value is not specified in the task, the value of environment variable E(IPA_PROT) is used instead.
+ - If both the environment variable E(IPA_PROT) and the value are not specified in the task, then default value is set.
type: str
- choices: [ http, https ]
+ choices: [http, https]
default: https
validate_certs:
description:
- - This only applies if C(ipa_prot) is I(https).
- - If set to C(no), the SSL certificates will not be validated.
- - This should only set to C(no) used on personally controlled sites using self-signed certificates.
+ - This only applies if O(ipa_prot) is V(https).
+ - If set to V(false), the SSL certificates are not validated.
+ - This should only set to V(false) used on personally controlled sites using self-signed certificates.
type: bool
- default: yes
+ default: true
ipa_timeout:
description:
- - Specifies idle timeout (in seconds) for the connection.
- - For bulk operations, you may want to increase this in order to avoid timeout from IPA server.
- - If the value is not specified in the task, the value of environment variable C(IPA_TIMEOUT) will be used instead.
- - If both the environment variable C(IPA_TIMEOUT) and the value are not specified in the task, then default value is set.
+ - Specifies idle timeout (in seconds) for the connection.
+ - For bulk operations, you may want to increase this in order to avoid timeout from IPA server.
+ - If the value is not specified in the task, the value of environment variable E(IPA_TIMEOUT) is used instead.
+ - If both the environment variable E(IPA_TIMEOUT) and the value are not specified in the task, then default value is
+ set.
type: int
default: 10
-'''
+"""
+
+ CONNECTION_NOTES = r"""
+options: {}
+notes:
+ - This module uses JSON-RPC over HTTP(S) to communicate with the FreeIPA server.
+ If you need to enroll the managed node into FreeIPA realm, you might want to consider using the collection
+ L(freeipa.ansible_freeipa, https://galaxy.ansible.com/ui/repo/published/freeipa/ansible_freeipa/), but shell access to one
+ node from the realm is required to manage the deployment.
+"""
diff --git a/plugins/doc_fragments/keycloak.py b/plugins/doc_fragments/keycloak.py
index 3ef0aeb9e3..2ec693eb99 100644
--- a/plugins/doc_fragments/keycloak.py
+++ b/plugins/doc_fragments/keycloak.py
@@ -1,78 +1,93 @@
-# -*- coding: utf-8 -*-
# Copyright (c) 2017, Eike Frost
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
class ModuleDocFragment(object):
# Standard documentation fragment
- DOCUMENTATION = r'''
+ DOCUMENTATION = r"""
options:
- auth_keycloak_url:
- description:
- - URL to the Keycloak instance.
- type: str
- required: true
- aliases:
- - url
+ auth_keycloak_url:
+ description:
+ - URL to the Keycloak instance.
+ type: str
+ required: true
+ aliases:
+ - url
- auth_client_id:
- description:
- - OpenID Connect I(client_id) to authenticate to the API with.
- type: str
- default: admin-cli
+ auth_client_id:
+ description:
+ - OpenID Connect C(client_id) to authenticate to the API with.
+ type: str
+ default: admin-cli
- auth_realm:
- description:
- - Keycloak realm name to authenticate to for API access.
- type: str
+ auth_realm:
+ description:
+ - Keycloak realm name to authenticate to for API access.
+ type: str
- auth_client_secret:
- description:
- - Client Secret to use in conjunction with I(auth_client_id) (if required).
- type: str
+ auth_client_secret:
+ description:
+ - Client Secret to use in conjunction with O(auth_client_id) (if required).
+ type: str
- auth_username:
- description:
- - Username to authenticate for API access with.
- type: str
- aliases:
- - username
+ auth_username:
+ description:
+ - Username to authenticate for API access with.
+ type: str
+ aliases:
+ - username
- auth_password:
- description:
- - Password to authenticate for API access with.
- type: str
- aliases:
- - password
+ auth_password:
+ description:
+ - Password to authenticate for API access with.
+ type: str
+ aliases:
+ - password
- token:
- description:
- - Authentication token for Keycloak API.
- type: str
- version_added: 3.0.0
+ token:
+ description:
+ - Authentication token for Keycloak API.
+ type: str
+ version_added: 3.0.0
- validate_certs:
- description:
- - Verify TLS certificates (do not disable this in production).
- type: bool
- default: yes
+ refresh_token:
+ description:
+ - Authentication refresh token for Keycloak API.
+ type: str
+ version_added: 10.3.0
- connection_timeout:
- description:
- - Controls the HTTP connections timeout period (in seconds) to Keycloak API.
- type: int
- default: 10
- version_added: 4.5.0
- http_agent:
- description:
- - Configures the HTTP User-Agent header.
- type: str
- default: Ansible
- version_added: 5.4.0
-'''
+ validate_certs:
+ description:
+ - Verify TLS certificates (do not disable this in production).
+ type: bool
+ default: true
+
+ connection_timeout:
+ description:
+ - Controls the HTTP connections timeout period (in seconds) to Keycloak API.
+ type: int
+ default: 10
+ version_added: 4.5.0
+
+ http_agent:
+ description:
+ - Configures the HTTP User-Agent header.
+ type: str
+ default: Ansible
+ version_added: 5.4.0
+"""
+
+ ACTIONGROUP_KEYCLOAK = r"""
+options: {}
+attributes:
+ action_group:
+ description: Use C(group/community.general.keycloak) in C(module_defaults) to set defaults for this module.
+ support: full
+ membership:
+ - community.general.keycloak
+"""
diff --git a/plugins/doc_fragments/ldap.py b/plugins/doc_fragments/ldap.py
index d5c8107d35..d787bfd65d 100644
--- a/plugins/doc_fragments/ldap.py
+++ b/plugins/doc_fragments/ldap.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
# Copyright (c) 2016, Peter Sagerson
# Copyright (c) 2016, Jiri Tyr
@@ -6,23 +5,45 @@
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
class ModuleDocFragment(object):
# Standard LDAP documentation fragment
- DOCUMENTATION = r'''
+ DOCUMENTATION = r"""
+notes:
+ - The default authentication settings attempts to use a SASL EXTERNAL bind over a UNIX domain socket. This works well with
+ the default Ubuntu install for example, which includes a C(cn=peercred,cn=external,cn=auth) ACL rule allowing root to
+ modify the server configuration. If you need to use a simple bind to access your server, pass the credentials in O(bind_dn)
+ and O(bind_pw).
options:
bind_dn:
description:
- - A DN to bind with. If this is omitted, we'll try a SASL bind with the EXTERNAL mechanism as default.
- - If this is blank, we'll use an anonymous bind.
+ - A DN to bind with. Try to use a SASL bind with the EXTERNAL mechanism as default when this parameter is omitted.
+ - Use an anonymous bind if the parameter is blank.
type: str
bind_pw:
description:
- - The password to use with I(bind_dn).
+ - The password to use with O(bind_dn).
type: str
+ default: ''
+ ca_path:
+ description:
+ - Set the path to PEM file with CA certs.
+ type: path
+ version_added: "6.5.0"
+ client_cert:
+ type: path
+ description:
+ - PEM formatted certificate chain file to be used for SSL client authentication.
+ - Required if O(client_key) is defined.
+ version_added: "7.1.0"
+ client_key:
+ type: path
+ description:
+ - PEM formatted file that contains your private key to be used for SSL client authentication.
+ - Required if O(client_cert) is defined.
+ version_added: "7.1.0"
dn:
required: true
description:
@@ -34,12 +55,13 @@ options:
type: str
description:
- Set the referrals chasing behavior.
- - C(anonymous) follow referrals anonymously. This is the default behavior.
- - C(disabled) disable referrals chasing. This sets C(OPT_REFERRALS) to off.
+ - V(anonymous) follow referrals anonymously. This is the default behavior.
+ - V(disabled) disable referrals chasing. This sets C(OPT_REFERRALS) to off.
version_added: 2.0.0
server_uri:
description:
- - The I(server_uri) parameter may be a comma- or whitespace-separated list of URIs containing only the schema, the host, and the port fields.
+ - The O(server_uri) parameter may be a comma- or whitespace-separated list of URIs containing only the schema, the host,
+ and the port fields.
- The default value lets the underlying LDAP client library look for a UNIX domain socket in its default location.
- Note that when using multiple URIs you cannot determine to which URI your client gets connected.
- For URIs containing additional fields, particularly when using commas, behavior is undefined.
@@ -47,21 +69,30 @@ options:
default: ldapi:///
start_tls:
description:
- - If true, we'll use the START_TLS LDAP extension.
+ - Use the START_TLS LDAP extension if set to V(true).
type: bool
- default: no
+ default: false
validate_certs:
description:
- - If set to C(no), SSL certificates will not be validated.
+ - If set to V(false), SSL certificates are not validated.
- This should only be used on sites using self-signed certificates.
type: bool
- default: yes
+ default: true
sasl_class:
description:
- The class to use for SASL authentication.
- - possible choices are C(external), C(gssapi).
type: str
choices: ['external', 'gssapi']
default: external
version_added: "2.0.0"
-'''
+ xorder_discovery:
+ description:
+ - Set the behavior on how to process Xordered DNs.
+ - V(enable) performs a C(ONELEVEL) search below the superior RDN to find the matching DN.
+ - V(disable) always uses the DN unmodified (as passed by the O(dn) parameter).
+ - V(auto) only performs a search if the first RDN does not contain an index number (C({x})).
+ type: str
+ choices: ['enable', 'auto', 'disable']
+ default: auto
+ version_added: "6.4.0"
+"""
diff --git a/plugins/doc_fragments/lxca_common.py b/plugins/doc_fragments/lxca_common.py
index b5e7d72948..72bc3b7054 100644
--- a/plugins/doc_fragments/lxca_common.py
+++ b/plugins/doc_fragments/lxca_common.py
@@ -1,16 +1,14 @@
-# -*- coding: utf-8 -*-
# Copyright (C) 2017 Lenovo, Inc.
# Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause)
# SPDX-License-Identifier: BSD-2-Clause
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
class ModuleDocFragment(object):
# Standard Pylxca documentation fragment
- DOCUMENTATION = r'''
+ DOCUMENTATION = r"""
author:
- Naval Patel (@navalkp)
- Prashant Bhosale (@prabhosa)
@@ -18,19 +16,19 @@ author:
options:
login_user:
description:
- - The username for use in HTTP basic authentication.
+ - The username for use in HTTP basic authentication.
type: str
required: true
login_password:
description:
- - The password for use in HTTP basic authentication.
+ - The password for use in HTTP basic authentication.
type: str
required: true
auth_url:
description:
- - lxca https full web address
+ - Lxca HTTPS full web address.
type: str
required: true
@@ -38,7 +36,6 @@ requirements:
- pylxca
notes:
- - Additional detail about pylxca can be found at U(https://github.com/lenovo/pylxca)
- - Playbooks using these modules can be found at U(https://github.com/lenovo/ansible.lenovo-lxca)
- - Check mode is not supported.
-'''
+ - Additional detail about pylxca can be found at U(https://github.com/lenovo/pylxca).
+ - Playbooks using these modules can be found at U(https://github.com/lenovo/ansible.lenovo-lxca).
+"""
diff --git a/plugins/doc_fragments/manageiq.py b/plugins/doc_fragments/manageiq.py
index be0dd70694..e7351e4f5e 100644
--- a/plugins/doc_fragments/manageiq.py
+++ b/plugins/doc_fragments/manageiq.py
@@ -1,17 +1,15 @@
-# -*- coding: utf-8 -*-
# Copyright (c) 2017, Daniel Korn
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
class ModuleDocFragment(object):
# Standard ManageIQ documentation fragment
- DOCUMENTATION = r'''
+ DOCUMENTATION = r"""
options:
manageiq_connection:
description:
@@ -21,33 +19,34 @@ options:
suboptions:
url:
description:
- - ManageIQ environment url. C(MIQ_URL) env var if set. otherwise, it is required to pass it.
+ - ManageIQ environment URL. E(MIQ_URL) environment variable if set. Otherwise, it is required to pass it.
type: str
required: false
username:
description:
- - ManageIQ username. C(MIQ_USERNAME) env var if set. otherwise, required if no token is passed in.
+ - ManageIQ username. E(MIQ_USERNAME) environment variable if set. Otherwise, required if no token is passed in.
type: str
password:
description:
- - ManageIQ password. C(MIQ_PASSWORD) env var if set. otherwise, required if no token is passed in.
+ - ManageIQ password. E(MIQ_PASSWORD) environment variable if set. Otherwise, required if no token is passed in.
type: str
token:
description:
- - ManageIQ token. C(MIQ_TOKEN) env var if set. otherwise, required if no username or password is passed in.
+ - ManageIQ token. E(MIQ_TOKEN) environment variable if set. Otherwise, required if no username or password is passed
+ in.
type: str
validate_certs:
description:
- - Whether SSL certificates should be verified for HTTPS requests. defaults to True.
+ - Whether SSL certificates should be verified for HTTPS requests.
type: bool
- default: yes
- aliases: [ verify_ssl ]
+ default: true
+ aliases: [verify_ssl]
ca_cert:
description:
- - The path to a CA bundle file or directory with certificates. defaults to None.
+ - The path to a CA bundle file or directory with certificates.
type: str
- aliases: [ ca_bundle_path ]
+ aliases: [ca_bundle_path]
requirements:
- 'manageiq-client U(https://github.com/ManageIQ/manageiq-api-client-python/)'
-'''
+"""
diff --git a/plugins/doc_fragments/nomad.py b/plugins/doc_fragments/nomad.py
index b19404e830..37485ef9a7 100644
--- a/plugins/doc_fragments/nomad.py
+++ b/plugins/doc_fragments/nomad.py
@@ -1,52 +1,56 @@
-# -*- coding: utf-8 -*-
# Copyright (c) 2020 FERREIRA Christophe
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
class ModuleDocFragment(object):
# Standard files documentation fragment
- DOCUMENTATION = r'''
+ DOCUMENTATION = r"""
options:
- host:
- description:
- - FQDN of Nomad server.
- required: true
- type: str
- use_ssl:
- description:
- - Use TLS/SSL connection.
- type: bool
- default: true
- timeout:
- description:
- - Timeout (in seconds) for the request to Nomad.
- type: int
- default: 5
- validate_certs:
- description:
- - Enable TLS/SSL certificate validation.
- type: bool
- default: true
- client_cert:
- description:
- - Path of certificate for TLS/SSL.
- type: path
- client_key:
- description:
- - Path of certificate's private key for TLS/SSL.
- type: path
- namespace:
- description:
- - Namespace for Nomad.
- type: str
- token:
- description:
- - ACL token for authentification.
- type: str
-'''
+ host:
+ description:
+ - FQDN of Nomad server.
+ required: true
+ type: str
+ port:
+ description:
+ - Port of Nomad server.
+ type: int
+ default: 4646
+ version_added: 8.0.0
+ use_ssl:
+ description:
+ - Use TLS/SSL connection.
+ type: bool
+ default: true
+ timeout:
+ description:
+ - Timeout (in seconds) for the request to Nomad.
+ type: int
+ default: 5
+ validate_certs:
+ description:
+ - Enable TLS/SSL certificate validation.
+ type: bool
+ default: true
+ client_cert:
+ description:
+ - Path of certificate for TLS/SSL.
+ type: path
+ client_key:
+ description:
+ - Path of certificate's private key for TLS/SSL.
+ type: path
+ namespace:
+ description:
+ - Namespace for Nomad.
+ type: str
+ token:
+ description:
+ - ACL token for authentication.
+ type: str
+"""
diff --git a/plugins/doc_fragments/onepassword.py b/plugins/doc_fragments/onepassword.py
new file mode 100644
index 0000000000..7a2c7566c3
--- /dev/null
+++ b/plugins/doc_fragments/onepassword.py
@@ -0,0 +1,77 @@
+
+# Copyright (c) 2023, Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import annotations
+
+
+class ModuleDocFragment(object):
+ DOCUMENTATION = r"""
+requirements:
+ - See U(https://support.1password.com/command-line/)
+options:
+ master_password:
+ description: The password used to unlock the specified vault.
+ aliases: ['vault_password']
+ type: str
+ section:
+ description: Item section containing the field to retrieve (case-insensitive). If absent, returns first match from any
+ section.
+ domain:
+ description: Domain of 1Password.
+ default: '1password.com'
+ type: str
+ subdomain:
+ description: The 1Password subdomain to authenticate against.
+ type: str
+ account_id:
+ description: The account ID to target.
+ type: str
+ username:
+ description: The username used to sign in.
+ type: str
+ secret_key:
+ description: The secret key used when performing an initial sign in.
+ type: str
+ service_account_token:
+ description:
+ - The access key for a service account.
+ - Only works with 1Password CLI version 2 or later.
+ type: str
+ vault:
+ description: Vault containing the item to retrieve (case-insensitive). If absent, searches all vaults.
+ type: str
+ connect_host:
+ description: The host for 1Password Connect. Must be used in combination with O(connect_token).
+ type: str
+ env:
+ - name: OP_CONNECT_HOST
+ version_added: 8.1.0
+ connect_token:
+ description: The token for 1Password Connect. Must be used in combination with O(connect_host).
+ type: str
+ env:
+ - name: OP_CONNECT_TOKEN
+ version_added: 8.1.0
+"""
+
+ LOOKUP = r"""
+options:
+ service_account_token:
+ env:
+ - name: OP_SERVICE_ACCOUNT_TOKEN
+ version_added: 8.2.0
+notes:
+ - This lookup uses an existing 1Password session if one exists. If not, and you have already performed an initial sign in
+ (meaning C(~/.op/config), C(~/.config/op/config) or C(~/.config/.op/config) exists), then only the O(master_password)
+ is required. You may optionally specify O(subdomain) in this scenario, otherwise the last used subdomain is used by C(op).
+ - This lookup can perform an initial login by providing O(subdomain), O(username), O(secret_key), and O(master_password).
+ - Can target a specific account by providing the O(account_id).
+ - Due to the B(very) sensitive nature of these credentials, it is B(highly) recommended that you only pass in the minimal
+ credentials needed at any given time. Also, store these credentials in an Ansible Vault using a key that is equal to or
+ greater in strength to the 1Password master password.
+ - This lookup stores potentially sensitive data from 1Password as Ansible facts. Facts are subject to caching if enabled,
+ which means this data could be stored in clear text on disk or in a database.
+ - Tested with C(op) version 2.7.2.
+"""
diff --git a/plugins/doc_fragments/oneview.py b/plugins/doc_fragments/oneview.py
index 0ab50e637b..9e64f02e1a 100644
--- a/plugins/doc_fragments/oneview.py
+++ b/plugins/doc_fragments/oneview.py
@@ -1,80 +1,75 @@
-# -*- coding: utf-8 -*-
#
# Copyright (c) 2016-2017, Hewlett Packard Enterprise Development LP
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
class ModuleDocFragment(object):
# OneView doc fragment
- DOCUMENTATION = r'''
+ DOCUMENTATION = r"""
options:
- config:
- description:
- - Path to a .json configuration file containing the OneView client configuration.
- The configuration file is optional and when used should be present in the host running the ansible commands.
- If the file path is not provided, the configuration will be loaded from environment variables.
- For links to example configuration files or how to use the environment variables verify the notes section.
- type: path
- api_version:
- description:
- - OneView API Version.
- type: int
- image_streamer_hostname:
- description:
- - IP address or hostname for the HPE Image Streamer REST API.
- type: str
- hostname:
- description:
- - IP address or hostname for the appliance.
- type: str
- username:
- description:
- - Username for API authentication.
- type: str
- password:
- description:
- - Password for API authentication.
- type: str
+ config:
+ description:
+ - Path to a JSON configuration file containing the OneView client configuration. The configuration file is optional
+ and when used should be present in the host running the ansible commands. If the file path is not provided, the configuration
+ is loaded from environment variables. For links to example configuration files or how to use the environment variables
+ verify the notes section.
+ type: path
+ api_version:
+ description:
+ - OneView API Version.
+ type: int
+ image_streamer_hostname:
+ description:
+ - IP address or hostname for the HPE Image Streamer REST API.
+ type: str
+ hostname:
+ description:
+ - IP address or hostname for the appliance.
+ type: str
+ username:
+ description:
+ - Username for API authentication.
+ type: str
+ password:
+ description:
+ - Password for API authentication.
+ type: str
requirements:
- - python >= 2.7.9
+ - Python >= 2.7.9
notes:
- - "A sample configuration file for the config parameter can be found at:
- U(https://github.com/HewlettPackard/oneview-ansible/blob/master/examples/oneview_config-rename.json)"
- - "Check how to use environment variables for configuration at:
- U(https://github.com/HewlettPackard/oneview-ansible#environment-variables)"
- - "Additional Playbooks for the HPE OneView Ansible modules can be found at:
- U(https://github.com/HewlettPackard/oneview-ansible/tree/master/examples)"
- - "The OneView API version used will directly affect returned and expected fields in resources.
- Information on setting the desired API version and can be found at:
- U(https://github.com/HewlettPackard/oneview-ansible#setting-your-oneview-version)"
- '''
+ - 'A sample configuration file for the config parameter can be found at:
+ U(https://github.com/HewlettPackard/oneview-ansible/blob/master/examples/oneview_config-rename.json).'
+ - 'Check how to use environment variables for configuration at: U(https://github.com/HewlettPackard/oneview-ansible#environment-variables).'
+ - 'Additional Playbooks for the HPE OneView Ansible modules can be found at: U(https://github.com/HewlettPackard/oneview-ansible/tree/master/examples).'
+ - 'The OneView API version used directly affects returned and expected fields in resources. Information on setting the desired
+ API version and can be found at: U(https://github.com/HewlettPackard/oneview-ansible#setting-your-oneview-version).'
+"""
- VALIDATEETAG = r'''
+ VALIDATEETAG = r"""
options:
- validate_etag:
- description:
- - When the ETag Validation is enabled, the request will be conditionally processed only if the current ETag
- for the resource matches the ETag provided in the data.
- type: bool
- default: yes
-'''
+ validate_etag:
+ description:
+ - When the ETag Validation is enabled, the request is conditionally processed only if the current ETag for the resource
+ matches the ETag provided in the data.
+ type: bool
+ default: true
+"""
- FACTSPARAMS = r'''
+ FACTSPARAMS = r"""
options:
- params:
- description:
- - List of params to delimit, filter and sort the list of resources.
- - "params allowed:
- - C(start): The first item to return, using 0-based indexing.
- - C(count): The number of resources to return.
- - C(filter): A general filter/query string to narrow the list of items returned.
- - C(sort): The sort order of the returned data set."
- type: dict
-'''
+ params:
+ description:
+ - List of parameters to delimit, filter and sort the list of resources.
+ - 'Parameter keys allowed are:'
+ - 'V(start): The first item to return, using 0-based indexing.'
+ - 'V(count): The number of resources to return.'
+ - 'V(filter): A general filter/query string to narrow the list of items returned.'
+ - 'V(sort): The sort order of the returned data set.'
+ type: dict
+"""
diff --git a/plugins/doc_fragments/online.py b/plugins/doc_fragments/online.py
index c0757ca6a1..c2b130e7a0 100644
--- a/plugins/doc_fragments/online.py
+++ b/plugins/doc_fragments/online.py
@@ -1,45 +1,41 @@
-# -*- coding: utf-8 -*-
# Copyright (c) Ansible project
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
class ModuleDocFragment(object):
# Standard documentation fragment
- DOCUMENTATION = r'''
+ DOCUMENTATION = r"""
options:
api_token:
description:
- Online OAuth token.
type: str
required: true
- aliases: [ oauth_token ]
+ aliases: [oauth_token]
api_url:
description:
- - Online API URL
+ - Online API URL.
type: str
default: 'https://api.online.net'
- aliases: [ base_url ]
+ aliases: [base_url]
api_timeout:
description:
- HTTP timeout to Online API in seconds.
type: int
default: 30
- aliases: [ timeout ]
+ aliases: [timeout]
validate_certs:
description:
- Validate SSL certs of the Online API.
type: bool
- default: yes
+ default: true
notes:
- - Also see the API documentation on U(https://console.online.net/en/api/)
- - If C(api_token) is not set within the module, the following
- environment variables can be used in decreasing order of precedence
- C(ONLINE_TOKEN), C(ONLINE_API_KEY), C(ONLINE_OAUTH_TOKEN), C(ONLINE_API_TOKEN)
- - If one wants to use a different C(api_url) one can also set the C(ONLINE_API_URL)
- environment variable.
-'''
+ - Also see the API documentation on U(https://console.online.net/en/api/).
+ - If O(api_token) is not set within the module, the following environment variables can be used in decreasing order of precedence
+ E(ONLINE_TOKEN), E(ONLINE_API_KEY), E(ONLINE_OAUTH_TOKEN), E(ONLINE_API_TOKEN).
+ - If one wants to use a different O(api_url) one can also set the E(ONLINE_API_URL) environment variable.
+"""
diff --git a/plugins/doc_fragments/opennebula.py b/plugins/doc_fragments/opennebula.py
index 91bfd09529..72ccf7d70d 100644
--- a/plugins/doc_fragments/opennebula.py
+++ b/plugins/doc_fragments/opennebula.py
@@ -1,45 +1,43 @@
-# -*- coding: utf-8 -*-
# Copyright (c) 2018, www.privaz.io Valletech AB
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
class ModuleDocFragment(object):
# OpenNebula common documentation
- DOCUMENTATION = r'''
+ DOCUMENTATION = r"""
options:
- api_url:
- description:
- - The ENDPOINT URL of the XMLRPC server.
- - If not specified then the value of the ONE_URL environment variable, if any, is used.
- type: str
- aliases:
- - api_endpoint
- api_username:
- description:
- - The name of the user for XMLRPC authentication.
- - If not specified then the value of the ONE_USERNAME environment variable, if any, is used.
- type: str
- api_password:
- description:
- - The password or token for XMLRPC authentication.
- - If not specified then the value of the ONE_PASSWORD environment variable, if any, is used.
- type: str
- aliases:
- - api_token
- validate_certs:
- description:
- - Whether to validate the SSL certificates or not.
- - This parameter is ignored if PYTHONHTTPSVERIFY environment variable is used.
- type: bool
- default: yes
- wait_timeout:
- description:
- - Time to wait for the desired state to be reached before timeout, in seconds.
- type: int
- default: 300
-'''
+ api_url:
+ description:
+ - The ENDPOINT URL of the XMLRPC server.
+ - If not specified then the value of the E(ONE_URL) environment variable, if any, is used.
+ type: str
+ aliases:
+ - api_endpoint
+ api_username:
+ description:
+ - The name of the user for XMLRPC authentication.
+ - If not specified then the value of the E(ONE_USERNAME) environment variable, if any, is used.
+ type: str
+ api_password:
+ description:
+ - The password or token for XMLRPC authentication.
+ - If not specified then the value of the E(ONE_PASSWORD) environment variable, if any, is used.
+ type: str
+ aliases:
+ - api_token
+ validate_certs:
+ description:
+ - Whether to validate the TLS/SSL certificates or not.
+ - This parameter is ignored if E(PYTHONHTTPSVERIFY) environment variable is used.
+ type: bool
+ default: true
+ wait_timeout:
+ description:
+ - Time to wait for the desired state to be reached before timeout, in seconds.
+ type: int
+ default: 300
+"""
diff --git a/plugins/doc_fragments/openswitch.py b/plugins/doc_fragments/openswitch.py
index 317ec904e5..aac90e020f 100644
--- a/plugins/doc_fragments/openswitch.py
+++ b/plugins/doc_fragments/openswitch.py
@@ -1,85 +1,69 @@
-# -*- coding: utf-8 -*-
# Copyright (c) 2015, Peter Sprygada
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
class ModuleDocFragment(object):
# Standard files documentation fragment
- DOCUMENTATION = r'''
+ DOCUMENTATION = r"""
options:
host:
description:
- - Specifies the DNS host name or address for connecting to the remote
- device over the specified transport. The value of host is used as
- the destination address for the transport. Note this argument
- does not affect the SSH argument.
+ - Specifies the DNS host name or address for connecting to the remote device over the specified transport. The value
+ of host is used as the destination address for the transport. Note this argument does not affect the SSH argument.
type: str
port:
description:
- - Specifies the port to use when building the connection to the remote
- device. This value applies to either I(cli) or I(rest). The port
- value will default to the appropriate transport common port if
- none is provided in the task. (cli=22, http=80, https=443). Note
- this argument does not affect the SSH transport.
+ - Specifies the port to use when building the connection to the remote device. This value applies to either O(transport=cli)
+ or O(transport=rest). The port value defaults to the appropriate transport common port if none is provided in the
+ task. (cli=22, http=80, https=443). Note this argument does not affect the SSH transport.
type: int
default: 0 (use common port)
username:
description:
- - Configures the username to use to authenticate the connection to
- the remote device. This value is used to authenticate
- either the CLI login or the eAPI authentication depending on which
- transport is used. Note this argument does not affect the SSH
- transport. If the value is not specified in the task, the value of
- environment variable C(ANSIBLE_NET_USERNAME) will be used instead.
+ - Configures the username to use to authenticate the connection to the remote device. This value is used to authenticate
+ either the CLI login or the eAPI authentication depending on which transport is used. Note this argument does not
+ affect the SSH transport. If the value is not specified in the task, the value of environment variable E(ANSIBLE_NET_USERNAME)
+ is used instead.
type: str
password:
description:
- - Specifies the password to use to authenticate the connection to
- the remote device. This is a common argument used for either I(cli)
- or I(rest) transports. Note this argument does not affect the SSH
- transport. If the value is not specified in the task, the value of
- environment variable C(ANSIBLE_NET_PASSWORD) will be used instead.
+ - Specifies the password to use to authenticate the connection to the remote device. This is a common argument used
+ for either O(transport=cli) or O(transport=rest). Note this argument does not affect the SSH transport. If the value
+ is not specified in the task, the value of environment variable E(ANSIBLE_NET_PASSWORD) is used instead.
type: str
timeout:
description:
- - Specifies the timeout in seconds for communicating with the network device
- for either connecting or sending commands. If the timeout is
- exceeded before the operation is completed, the module will error.
+ - Specifies the timeout in seconds for communicating with the network device for either connecting or sending commands.
+ If the timeout is exceeded before the operation is completed, the module fails.
type: int
default: 10
ssh_keyfile:
description:
- - Specifies the SSH key to use to authenticate the connection to
- the remote device. This argument is only used for the I(cli)
- transports. If the value is not specified in the task, the value of
- environment variable C(ANSIBLE_NET_SSH_KEYFILE) will be used instead.
+ - Specifies the SSH key to use to authenticate the connection to the remote device. This argument is only used for O(transport=cli).
+ If the value is not specified in the task, the value of environment variable E(ANSIBLE_NET_SSH_KEYFILE) is used instead.
type: path
transport:
description:
- - Configures the transport connection to use when connecting to the
- remote device. The transport argument supports connectivity to the
- device over ssh, cli or REST.
+ - Configures the transport connection to use when connecting to the remote device. The transport argument supports connectivity
+ to the device over SSH (V(ssh)), CLI (V(cli)), or REST (V(rest)).
required: true
type: str
- choices: [ cli, rest, ssh ]
+ choices: [cli, rest, ssh]
default: ssh
use_ssl:
description:
- - Configures the I(transport) to use SSL if set to C(yes) only when the
- I(transport) argument is configured as rest. If the transport
- argument is not I(rest), this value is ignored.
+ - Configures the O(transport) to use SSL if set to V(true) only when the O(transport) argument is configured as rest.
+ If the transport argument is not V(rest), this value is ignored.
type: bool
- default: yes
+ default: true
provider:
description:
- - Convenience method that allows all I(openswitch) arguments to be passed as
- a dict object. All constraints (required, choices, etc) must be
- met either by individual arguments or values in this dict.
+ - Convenience method that allows all C(openswitch) arguments to be passed as a dict object. All constraints (required,
+ choices, and so on) must be met either by individual arguments or values in this dict.
type: dict
-'''
+"""
diff --git a/plugins/doc_fragments/oracle.py b/plugins/doc_fragments/oracle.py
index 9ca4706baa..05120f7aa3 100644
--- a/plugins/doc_fragments/oracle.py
+++ b/plugins/doc_fragments/oracle.py
@@ -1,84 +1,80 @@
-# -*- coding: utf-8 -*-
# Copyright (c) 2018, Oracle and/or its affiliates.
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
+
+#
+# DEPRECATED
+#
+# This fragment is deprecated and will be removed in community.general 13.0.0
+#
class ModuleDocFragment(object):
- DOCUMENTATION = """
- requirements:
- - "python >= 2.7"
- - Python SDK for Oracle Cloud Infrastructure U(https://oracle-cloud-infrastructure-python-sdk.readthedocs.io)
- notes:
- - For OCI python sdk configuration, please refer to
- U(https://oracle-cloud-infrastructure-python-sdk.readthedocs.io/en/latest/configuration.html)
- options:
- config_file_location:
- description:
- - Path to configuration file. If not set then the value of the OCI_CONFIG_FILE environment variable,
- if any, is used. Otherwise, defaults to ~/.oci/config.
- type: str
- config_profile_name:
- description:
- - The profile to load from the config file referenced by C(config_file_location). If not set, then the
- value of the OCI_CONFIG_PROFILE environment variable, if any, is used. Otherwise, defaults to the
- "DEFAULT" profile in C(config_file_location).
- default: "DEFAULT"
- type: str
- api_user:
- description:
- - The OCID of the user, on whose behalf, OCI APIs are invoked. If not set, then the
- value of the OCI_USER_OCID environment variable, if any, is used. This option is required if the user
- is not specified through a configuration file (See C(config_file_location)). To get the user's OCID,
- please refer U(https://docs.us-phoenix-1.oraclecloud.com/Content/API/Concepts/apisigningkey.htm).
- type: str
- api_user_fingerprint:
- description:
- - Fingerprint for the key pair being used. If not set, then the value of the OCI_USER_FINGERPRINT
- environment variable, if any, is used. This option is required if the key fingerprint is not
- specified through a configuration file (See C(config_file_location)). To get the key pair's
- fingerprint value please refer
- U(https://docs.us-phoenix-1.oraclecloud.com/Content/API/Concepts/apisigningkey.htm).
- type: str
- api_user_key_file:
- description:
- - Full path and filename of the private key (in PEM format). If not set, then the value of the
- OCI_USER_KEY_FILE variable, if any, is used. This option is required if the private key is
- not specified through a configuration file (See C(config_file_location)). If the key is encrypted
- with a pass-phrase, the C(api_user_key_pass_phrase) option must also be provided.
- type: path
- api_user_key_pass_phrase:
- description:
- - Passphrase used by the key referenced in C(api_user_key_file), if it is encrypted. If not set, then
- the value of the OCI_USER_KEY_PASS_PHRASE variable, if any, is used. This option is required if the
- key passphrase is not specified through a configuration file (See C(config_file_location)).
- type: str
- auth_type:
- description:
- - The type of authentication to use for making API requests. By default C(auth_type="api_key") based
- authentication is performed and the API key (see I(api_user_key_file)) in your config file will be
- used. If this 'auth_type' module option is not specified, the value of the OCI_ANSIBLE_AUTH_TYPE,
- if any, is used. Use C(auth_type="instance_principal") to use instance principal based authentication
- when running ansible playbooks within an OCI compute instance.
- choices: ['api_key', 'instance_principal']
- default: 'api_key'
- type: str
- tenancy:
- description:
- - OCID of your tenancy. If not set, then the value of the OCI_TENANCY variable, if any, is
- used. This option is required if the tenancy OCID is not specified through a configuration file
- (See C(config_file_location)). To get the tenancy OCID, please refer
- U(https://docs.us-phoenix-1.oraclecloud.com/Content/API/Concepts/apisigningkey.htm)
- type: str
- region:
- description:
- - The Oracle Cloud Infrastructure region to use for all OCI API requests. If not set, then the
- value of the OCI_REGION variable, if any, is used. This option is required if the region is
- not specified through a configuration file (See C(config_file_location)). Please refer to
- U(https://docs.us-phoenix-1.oraclecloud.com/Content/General/Concepts/regions.htm) for more information
- on OCI regions.
- type: str
- """
+ DOCUMENTATION = r"""
+requirements:
+ - Python SDK for Oracle Cloud Infrastructure U(https://oracle-cloud-infrastructure-python-sdk.readthedocs.io)
+notes:
+ - For OCI Python SDK configuration, please refer to U(https://oracle-cloud-infrastructure-python-sdk.readthedocs.io/en/latest/configuration.html).
+options:
+ config_file_location:
+ description:
+ - Path to configuration file. If not set then the value of the E(OCI_CONFIG_FILE) environment variable, if any, is used.
+ Otherwise, defaults to C(~/.oci/config).
+ type: str
+ config_profile_name:
+ description:
+ - The profile to load from the config file referenced by O(config_file_location). If not set, then the value of the
+ E(OCI_CONFIG_PROFILE) environment variable, if any, is used. Otherwise, defaults to the C(DEFAULT) profile in O(config_file_location).
+ default: "DEFAULT"
+ type: str
+ api_user:
+ description:
+ - The OCID of the user, on whose behalf, OCI APIs are invoked. If not set, then the value of the E(OCI_USER_OCID) environment
+ variable, if any, is used. This option is required if the user is not specified through a configuration file (See
+ O(config_file_location)). To get the user's OCID, please refer U(https://docs.us-phoenix-1.oraclecloud.com/Content/API/Concepts/apisigningkey.htm).
+ type: str
+ api_user_fingerprint:
+ description:
+ - Fingerprint for the key pair being used. If not set, then the value of the E(OCI_USER_FINGERPRINT) environment variable,
+ if any, is used. This option is required if the key fingerprint is not specified through a configuration file (See
+ O(config_file_location)). To get the key pair's fingerprint value please refer to
+ U(https://docs.us-phoenix-1.oraclecloud.com/Content/API/Concepts/apisigningkey.htm).
+ type: str
+ api_user_key_file:
+ description:
+ - Full path and filename of the private key (in PEM format). If not set, then the value of the E(OCI_USER_KEY_FILE)
+ variable, if any, is used. This option is required if the private key is not specified through a configuration file
+ (See O(config_file_location)). If the key is encrypted with a pass-phrase, the O(api_user_key_pass_phrase) option
+ must also be provided.
+ type: path
+ api_user_key_pass_phrase:
+ description:
+ - Passphrase used by the key referenced in O(api_user_key_file), if it is encrypted. If not set, then the value of the
+ E(OCI_USER_KEY_PASS_PHRASE) variable, if any, is used. This option is required if the key passphrase is not specified
+ through a configuration file (See O(config_file_location)).
+ type: str
+ auth_type:
+ description:
+ - The type of authentication to use for making API requests. By default O(auth_type=api_key) based authentication is
+ performed and the API key (see O(api_user_key_file)) in your config file is used. If O(auth_type) is not specified,
+ the value of the E(OCI_ANSIBLE_AUTH_TYPE), if any, is used. Use O(auth_type=instance_principal) to use instance principal
+ based authentication when running ansible playbooks within an OCI compute instance.
+ choices: ['api_key', 'instance_principal']
+ default: 'api_key'
+ type: str
+ tenancy:
+ description:
+ - OCID of your tenancy. If not set, then the value of the E(OCI_TENANCY) variable, if any, is used. This option is required
+ if the tenancy OCID is not specified through a configuration file (See O(config_file_location)). To get the tenancy
+ OCID, please refer to U(https://docs.us-phoenix-1.oraclecloud.com/Content/API/Concepts/apisigningkey.htm).
+ type: str
+ region:
+ description:
+ - The Oracle Cloud Infrastructure region to use for all OCI API requests. If not set, then the value of the E(OCI_REGION)
+ variable, if any, is used. This option is required if the region is not specified through a configuration file (See
+ O(config_file_location)). Please refer to U(https://docs.us-phoenix-1.oraclecloud.com/Content/General/Concepts/regions.htm)
+ for more information on OCI regions.
+ type: str
+"""
diff --git a/plugins/doc_fragments/oracle_creatable_resource.py b/plugins/doc_fragments/oracle_creatable_resource.py
index 7c1551ec06..1728e56d81 100644
--- a/plugins/doc_fragments/oracle_creatable_resource.py
+++ b/plugins/doc_fragments/oracle_creatable_resource.py
@@ -1,26 +1,29 @@
-# -*- coding: utf-8 -*-
# Copyright (c) 2018, Oracle and/or its affiliates.
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
+
+#
+# DEPRECATED
+#
+# This fragment is deprecated and will be removed in community.general 13.0.0
+#
class ModuleDocFragment(object):
- DOCUMENTATION = """
- options:
- force_create:
- description: Whether to attempt non-idempotent creation of a resource. By default, create resource is an
- idempotent operation, and doesn't create the resource if it already exists. Setting this option
- to true, forcefully creates a copy of the resource, even if it already exists.This option is
- mutually exclusive with I(key_by).
- default: False
- type: bool
- key_by:
- description: The list of comma-separated attributes of this resource which should be used to uniquely
- identify an instance of the resource. By default, all the attributes of a resource except
- I(freeform_tags) are used to uniquely identify a resource.
- type: list
- elements: str
- """
+ DOCUMENTATION = r"""
+options:
+ force_create:
+ description: Whether to attempt non-idempotent creation of a resource. By default, create resource is an idempotent operation,
+ and does not create the resource if it already exists. Setting this option to V(true), forcefully creates a copy of
+ the resource, even if it already exists. This option is mutually exclusive with O(key_by).
+ default: false
+ type: bool
+ key_by:
+ description: The list of comma-separated attributes of this resource which should be used to uniquely identify an instance
+ of the resource. By default, all the attributes of a resource except O(freeform_tags) are used to uniquely identify
+ a resource.
+ type: list
+ elements: str
+"""
diff --git a/plugins/doc_fragments/oracle_display_name_option.py b/plugins/doc_fragments/oracle_display_name_option.py
index eae5f44593..1ac210bbd4 100644
--- a/plugins/doc_fragments/oracle_display_name_option.py
+++ b/plugins/doc_fragments/oracle_display_name_option.py
@@ -1,17 +1,21 @@
-# -*- coding: utf-8 -*-
# Copyright (c) 2018, Oracle and/or its affiliates.
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
+
+#
+# DEPRECATED
+#
+# This fragment is deprecated and will be removed in community.general 13.0.0
+#
class ModuleDocFragment(object):
- DOCUMENTATION = """
- options:
- display_name:
- description: Use I(display_name) along with the other options to return only resources that match the given
- display name exactly.
- type: str
- """
+ DOCUMENTATION = r"""
+options:
+ display_name:
+ description: Use O(display_name) along with the other options to return only resources that match the given display name
+ exactly.
+ type: str
+"""
diff --git a/plugins/doc_fragments/oracle_name_option.py b/plugins/doc_fragments/oracle_name_option.py
index 362071f946..a281bc5e68 100644
--- a/plugins/doc_fragments/oracle_name_option.py
+++ b/plugins/doc_fragments/oracle_name_option.py
@@ -1,17 +1,20 @@
-# -*- coding: utf-8 -*-
# Copyright (c) 2018, Oracle and/or its affiliates.
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
+
+#
+# DEPRECATED
+#
+# This fragment is deprecated and will be removed in community.general 13.0.0
+#
class ModuleDocFragment(object):
- DOCUMENTATION = """
- options:
- name:
- description: Use I(name) along with the other options to return only resources that match the given name
- exactly.
- type: str
- """
+ DOCUMENTATION = r"""
+options:
+ name:
+ description: Use O(name) along with the other options to return only resources that match the given name exactly.
+ type: str
+"""
diff --git a/plugins/doc_fragments/oracle_tags.py b/plugins/doc_fragments/oracle_tags.py
index 3789dbe912..ec0096ba33 100644
--- a/plugins/doc_fragments/oracle_tags.py
+++ b/plugins/doc_fragments/oracle_tags.py
@@ -1,23 +1,25 @@
-# -*- coding: utf-8 -*-
# Copyright (c) 2018, Oracle and/or its affiliates.
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
+
+#
+# DEPRECATED
+#
+# This fragment is deprecated and will be removed in community.general 13.0.0
+#
class ModuleDocFragment(object):
- DOCUMENTATION = """
- options:
- defined_tags:
- description: Defined tags for this resource. Each key is predefined and scoped to a namespace. For more
- information, see
- U(https://docs.us-phoenix-1.oraclecloud.com/Content/General/Concepts/resourcetags.htm).
- type: dict
- freeform_tags:
- description: Free-form tags for this resource. Each tag is a simple key-value pair with no predefined name,
- type, or namespace. For more information, see
- U(https://docs.us-phoenix-1.oraclecloud.com/Content/General/Concepts/resourcetags.htm).
- type: dict
- """
+ DOCUMENTATION = r"""
+options:
+ defined_tags:
+ description: Defined tags for this resource. Each key is predefined and scoped to a namespace. For more information, see
+ U(https://docs.us-phoenix-1.oraclecloud.com/Content/General/Concepts/resourcetags.htm).
+ type: dict
+ freeform_tags:
+ description: Free-form tags for this resource. Each tag is a simple key-value pair with no predefined name, type, or namespace.
+ For more information, see U(https://docs.us-phoenix-1.oraclecloud.com/Content/General/Concepts/resourcetags.htm).
+ type: dict
+"""
diff --git a/plugins/doc_fragments/oracle_wait_options.py b/plugins/doc_fragments/oracle_wait_options.py
index 6ca2a8c033..868fb3cb04 100644
--- a/plugins/doc_fragments/oracle_wait_options.py
+++ b/plugins/doc_fragments/oracle_wait_options.py
@@ -1,27 +1,30 @@
-# -*- coding: utf-8 -*-
# Copyright (c) 2018, Oracle and/or its affiliates.
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
+
+#
+# DEPRECATED
+#
+# This fragment is deprecated and will be removed in community.general 13.0.0
+#
class ModuleDocFragment(object):
- DOCUMENTATION = """
- options:
- wait:
- description: Whether to wait for create or delete operation to complete.
- default: yes
- type: bool
- wait_timeout:
- description: Time, in seconds, to wait when I(wait=yes).
- default: 1200
- type: int
- wait_until:
- description: The lifecycle state to wait for the resource to transition into when I(wait=yes). By default,
- when I(wait=yes), we wait for the resource to get into ACTIVE/ATTACHED/AVAILABLE/PROVISIONED/
- RUNNING applicable lifecycle state during create operation & to get into DELETED/DETACHED/
- TERMINATED lifecycle state during delete operation.
- type: str
- """
+ DOCUMENTATION = r"""
+options:
+ wait:
+ description: Whether to wait for create or delete operation to complete.
+ default: true
+ type: bool
+ wait_timeout:
+ description: Time, in seconds, to wait when O(wait=true).
+ default: 1200
+ type: int
+ wait_until:
+ description: The lifecycle state to wait for the resource to transition into when O(wait=true). By default, when O(wait=true),
+ we wait for the resource to get into ACTIVE/ATTACHED/AVAILABLE/PROVISIONED/ RUNNING applicable lifecycle state during
+ create operation and to get into DELETED/DETACHED/ TERMINATED lifecycle state during delete operation.
+ type: str
+"""
diff --git a/plugins/doc_fragments/pipx.py b/plugins/doc_fragments/pipx.py
new file mode 100644
index 0000000000..70a502ddda
--- /dev/null
+++ b/plugins/doc_fragments/pipx.py
@@ -0,0 +1,40 @@
+
+# Copyright (c) 2024, Alexei Znamensky
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import annotations
+
+
+class ModuleDocFragment(object):
+ DOCUMENTATION = r"""
+options:
+ global:
+ description:
+ - The module passes the C(--global) argument to C(pipx), to execute actions in global scope.
+ type: bool
+ default: false
+ executable:
+ description:
+ - Path to the C(pipx) installed in the system.
+ - If not specified, the module uses C(python -m pipx) to run the tool, using the same Python interpreter as ansible
+ itself.
+ type: path
+requirements:
+ - This module requires C(pipx) version 1.7.0 or above.
+ - Please note that C(pipx) 1.7.0 requires Python 3.8 or above.
+ - Please note that C(pipx) 1.8.0 requires Python 3.9 or above.
+notes:
+ - This module does not install the C(pipx) python package, however that can be easily done with the module M(ansible.builtin.pip).
+ - This module does not require C(pipx) to be in the shell C(PATH), but it must be loadable by Python as a module, meaning
+ that C(python -m pipx) must work.
+ - This module honors C(pipx) environment variables such as but not limited to E(PIPX_HOME) and E(PIPX_BIN_DIR) passed using
+ the R(environment Ansible keyword, playbooks_environment).
+ - This module disabled emojis in the output of C(pipx) commands to reduce clutter. In C(pipx) 1.8.0, the environment variable
+ E(USE_EMOJI) was renamed to E(PIPX_USE_EMOJI) and for compatibility with both versions, starting in community.general
+ 11.4.0, this module sets them both to C(0) to disable emojis.
+seealso:
+ - name: C(pipx) command manual page
+ description: Manual page for the command.
+ link: https://pipx.pypa.io/latest/docs/
+"""
diff --git a/plugins/doc_fragments/pritunl.py b/plugins/doc_fragments/pritunl.py
index 51ab979b54..17e03fc716 100644
--- a/plugins/doc_fragments/pritunl.py
+++ b/plugins/doc_fragments/pritunl.py
@@ -1,44 +1,37 @@
-# -*- coding: utf-8 -*-
# Copyright (c) 2021, Florian Dambrine
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import absolute_import, division, print_function
-
-__metaclass__ = type
+from __future__ import annotations
class ModuleDocFragment(object):
DOCUMENTATION = r"""
options:
- pritunl_url:
- type: str
- required: true
- description:
- - URL and port of the Pritunl server on which the API is enabled.
-
- pritunl_api_token:
- type: str
- required: true
- description:
- - API Token of a Pritunl admin user.
- - It needs to be enabled in Administrators > USERNAME > Enable Token Authentication.
-
- pritunl_api_secret:
- type: str
- required: true
- description:
- - API Secret found in Administrators > USERNAME > API Secret.
-
- validate_certs:
- type: bool
- required: false
- default: true
- description:
- - If certificates should be validated or not.
- - This should never be set to C(false), except if you are very sure that
- your connection to the server can not be subject to a Man In The Middle
- attack.
+ pritunl_url:
+ type: str
+ required: true
+ description:
+ - URL and port of the Pritunl server on which the API is enabled.
+ pritunl_api_token:
+ type: str
+ required: true
+ description:
+ - API Token of a Pritunl admin user.
+ - It needs to be enabled in Administrators > USERNAME > Enable Token Authentication.
+ pritunl_api_secret:
+ type: str
+ required: true
+ description:
+ - API Secret found in Administrators > USERNAME > API Secret.
+ validate_certs:
+ type: bool
+ required: false
+ default: true
+ description:
+ - If certificates should be validated or not.
+ - This should never be set to V(false), except if you are very sure that your connection to the server can not be subject
+ to a Man In The Middle attack.
"""
diff --git a/plugins/doc_fragments/proxmox.py b/plugins/doc_fragments/proxmox.py
deleted file mode 100644
index 50fe6ea0e6..0000000000
--- a/plugins/doc_fragments/proxmox.py
+++ /dev/null
@@ -1,65 +0,0 @@
-# -*- coding: utf-8 -*-
-# Copyright (c) Ansible project
-# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-
-class ModuleDocFragment(object):
- # Common parameters for Proxmox VE modules
- DOCUMENTATION = r'''
-options:
- api_host:
- description:
- - Specify the target host of the Proxmox VE cluster.
- type: str
- required: true
- api_user:
- description:
- - Specify the user to authenticate with.
- type: str
- required: true
- api_password:
- description:
- - Specify the password to authenticate with.
- - You can use C(PROXMOX_PASSWORD) environment variable.
- type: str
- api_token_id:
- description:
- - Specify the token ID.
- type: str
- version_added: 1.3.0
- api_token_secret:
- description:
- - Specify the token secret.
- type: str
- version_added: 1.3.0
- validate_certs:
- description:
- - If C(no), SSL certificates will not be validated.
- - This should only be used on personally controlled sites using self-signed certificates.
- type: bool
- default: no
-requirements: [ "proxmoxer", "requests" ]
-'''
-
- SELECTION = r'''
-options:
- vmid:
- description:
- - Specifies the instance ID.
- - If not set the next available ID will be fetched from ProxmoxAPI.
- type: int
- node:
- description:
- - Proxmox VE node on which to operate.
- - Only required for I(state=present).
- - For every other states it will be autodiscovered.
- type: str
- pool:
- description:
- - Add the new VM to the specified pool.
- type: str
-'''
diff --git a/plugins/doc_fragments/purestorage.py b/plugins/doc_fragments/purestorage.py
deleted file mode 100644
index 8db8c3b3da..0000000000
--- a/plugins/doc_fragments/purestorage.py
+++ /dev/null
@@ -1,63 +0,0 @@
-# -*- coding: utf-8 -*-
-
-# Copyright (c) 2017, Simon Dodsley
-# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-
-class ModuleDocFragment(object):
-
- # Standard Pure Storage documentation fragment
- DOCUMENTATION = r'''
-options:
- - See separate platform section for more details
-requirements:
- - See separate platform section for more details
-notes:
- - Ansible modules are available for the following Pure Storage products: FlashArray, FlashBlade
-'''
-
- # Documentation fragment for FlashBlade
- FB = r'''
-options:
- fb_url:
- description:
- - FlashBlade management IP address or Hostname.
- type: str
- api_token:
- description:
- - FlashBlade API token for admin privileged user.
- type: str
-notes:
- - This module requires the C(purity_fb) Python library
- - You must set C(PUREFB_URL) and C(PUREFB_API) environment variables
- if I(fb_url) and I(api_token) arguments are not passed to the module directly
-requirements:
- - python >= 2.7
- - purity_fb >= 1.1
-'''
-
- # Documentation fragment for FlashArray
- FA = r'''
-options:
- fa_url:
- description:
- - FlashArray management IPv4 address or Hostname.
- type: str
- required: true
- api_token:
- description:
- - FlashArray API token for admin privileged user.
- type: str
- required: true
-notes:
- - This module requires the C(purestorage) Python library
- - You must set C(PUREFA_URL) and C(PUREFA_API) environment variables
- if I(fa_url) and I(api_token) arguments are not passed to the module directly
-requirements:
- - python >= 2.7
- - purestorage
-'''
diff --git a/plugins/doc_fragments/rackspace.py b/plugins/doc_fragments/rackspace.py
deleted file mode 100644
index 9e22316022..0000000000
--- a/plugins/doc_fragments/rackspace.py
+++ /dev/null
@@ -1,118 +0,0 @@
-# -*- coding: utf-8 -*-
-
-# Copyright (c) 2014, Matt Martz
-# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-
-class ModuleDocFragment(object):
-
- # Standard Rackspace only documentation fragment
- DOCUMENTATION = r'''
-options:
- api_key:
- description:
- - Rackspace API key, overrides I(credentials).
- type: str
- aliases: [ password ]
- credentials:
- description:
- - File to find the Rackspace credentials in. Ignored if I(api_key) and
- I(username) are provided.
- type: path
- aliases: [ creds_file ]
- env:
- description:
- - Environment as configured in I(~/.pyrax.cfg),
- see U(https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md#pyrax-configuration).
- type: str
- region:
- description:
- - Region to create an instance in.
- type: str
- username:
- description:
- - Rackspace username, overrides I(credentials).
- type: str
- validate_certs:
- description:
- - Whether or not to require SSL validation of API endpoints.
- type: bool
- aliases: [ verify_ssl ]
-requirements:
- - python >= 2.6
- - pyrax
-notes:
- - The following environment variables can be used, C(RAX_USERNAME),
- C(RAX_API_KEY), C(RAX_CREDS_FILE), C(RAX_CREDENTIALS), C(RAX_REGION).
- - C(RAX_CREDENTIALS) and C(RAX_CREDS_FILE) points to a credentials file
- appropriate for pyrax. See U(https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md#authenticating)
- - C(RAX_USERNAME) and C(RAX_API_KEY) obviate the use of a credentials file
- - C(RAX_REGION) defines a Rackspace Public Cloud region (DFW, ORD, LON, ...)
-'''
-
- # Documentation fragment including attributes to enable communication
- # of other OpenStack clouds. Not all rax modules support this.
- OPENSTACK = r'''
-options:
- api_key:
- type: str
- description:
- - Rackspace API key, overrides I(credentials).
- aliases: [ password ]
- auth_endpoint:
- type: str
- description:
- - The URI of the authentication service.
- - If not specified will be set to U(https://identity.api.rackspacecloud.com/v2.0/)
- credentials:
- type: path
- description:
- - File to find the Rackspace credentials in. Ignored if I(api_key) and
- I(username) are provided.
- aliases: [ creds_file ]
- env:
- type: str
- description:
- - Environment as configured in I(~/.pyrax.cfg),
- see U(https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md#pyrax-configuration).
- identity_type:
- type: str
- description:
- - Authentication mechanism to use, such as rackspace or keystone.
- default: rackspace
- region:
- type: str
- description:
- - Region to create an instance in.
- tenant_id:
- type: str
- description:
- - The tenant ID used for authentication.
- tenant_name:
- type: str
- description:
- - The tenant name used for authentication.
- username:
- type: str
- description:
- - Rackspace username, overrides I(credentials).
- validate_certs:
- description:
- - Whether or not to require SSL validation of API endpoints.
- type: bool
- aliases: [ verify_ssl ]
-requirements:
- - python >= 2.6
- - pyrax
-notes:
- - The following environment variables can be used, C(RAX_USERNAME),
- C(RAX_API_KEY), C(RAX_CREDS_FILE), C(RAX_CREDENTIALS), C(RAX_REGION).
- - C(RAX_CREDENTIALS) and C(RAX_CREDS_FILE) points to a credentials file
- appropriate for pyrax. See U(https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md#authenticating)
- - C(RAX_USERNAME) and C(RAX_API_KEY) obviate the use of a credentials file
- - C(RAX_REGION) defines a Rackspace Public Cloud region (DFW, ORD, LON, ...)
-'''
diff --git a/plugins/doc_fragments/redfish.py b/plugins/doc_fragments/redfish.py
new file mode 100644
index 0000000000..ed95eeab83
--- /dev/null
+++ b/plugins/doc_fragments/redfish.py
@@ -0,0 +1,35 @@
+
+# Copyright (c) 2025 Ansible community
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import annotations
+
+
+class ModuleDocFragment(object):
+
+ # Use together with the community.general.redfish module utils' REDFISH_COMMON_ARGUMENT_SPEC
+ DOCUMENTATION = r"""
+options:
+ validate_certs:
+ description:
+ - If V(false), TLS/SSL certificates are not validated.
+ - Set this to V(true) to enable certificate checking. Should be used together with O(ca_path).
+ type: bool
+ default: false
+ ca_path:
+ description:
+ - PEM formatted file that contains a CA certificate to be used for validation.
+ - Only used if O(validate_certs=true).
+ type: path
+ ciphers:
+ required: false
+ description:
+ - TLS/SSL Ciphers to use for the request.
+ - When a list is provided, all ciphers are joined in order with V(:).
+ - See the L(OpenSSL Cipher List Format,https://www.openssl.org/docs/manmaster/man1/openssl-ciphers.html#CIPHER-LIST-FORMAT)
+ for more details.
+ - The available ciphers is dependent on the Python and OpenSSL/LibreSSL versions.
+ type: list
+ elements: str
+"""
diff --git a/plugins/doc_fragments/redis.py b/plugins/doc_fragments/redis.py
index 2d40330519..38889a3cbd 100644
--- a/plugins/doc_fragments/redis.py
+++ b/plugins/doc_fragments/redis.py
@@ -1,16 +1,14 @@
-# -*- coding: utf-8 -*-
# Copyright (c) 2021, Andreas Botzner
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
class ModuleDocFragment(object):
# Common parameters for Redis modules
- DOCUMENTATION = r'''
+ DOCUMENTATION = r"""
options:
login_host:
description:
@@ -40,19 +38,26 @@ options:
validate_certs:
description:
- Specify whether or not to validate TLS certificates.
- - This should only be turned off for personally controlled sites or with
- C(localhost) as target.
+ - This should only be turned off for personally controlled sites or with C(localhost) as target.
type: bool
default: true
ca_certs:
description:
- - Path to root certificates file. If not set and I(tls) is
- set to C(true), certifi ca-certificates will be used.
+ - Path to root certificates file. If not set and O(tls) is set to V(true), certifi's CA certificates are used.
type: str
-requirements: [ "redis", "certifi" ]
+ client_cert_file:
+ description:
+ - Path to the client certificate file.
+ type: str
+ version_added: 9.3.0
+ client_key_file:
+ description:
+ - Path to the client private key file.
+ type: str
+ version_added: 9.3.0
+requirements: ["redis", "certifi"]
notes:
- - Requires the C(redis) Python package on the remote host. You can
- install it with pip (C(pip install redis)) or with a package manager.
- Information on the library can be found at U(https://github.com/andymccurdy/redis-py).
-'''
+ - Requires the C(redis) Python package on the remote host. You can install it with pip (C(pip install redis)) or with a
+ package manager. Information on the library can be found at U(https://github.com/andymccurdy/redis-py).
+"""
diff --git a/plugins/doc_fragments/rundeck.py b/plugins/doc_fragments/rundeck.py
index 62c8648e96..3e9d99aa7a 100644
--- a/plugins/doc_fragments/rundeck.py
+++ b/plugins/doc_fragments/rundeck.py
@@ -1,17 +1,15 @@
-# -*- coding: utf-8 -*-
# Copyright (c) 2021, Phillipe Smith
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
class ModuleDocFragment(object):
# Standard files documentation fragment
- DOCUMENTATION = r'''
+ DOCUMENTATION = r"""
options:
url:
type: str
@@ -29,4 +27,4 @@ options:
description:
- Rundeck User API Token.
required: true
-'''
+"""
diff --git a/plugins/doc_fragments/scaleway.py b/plugins/doc_fragments/scaleway.py
index 187288fbf8..7810deb901 100644
--- a/plugins/doc_fragments/scaleway.py
+++ b/plugins/doc_fragments/scaleway.py
@@ -1,51 +1,57 @@
-# -*- coding: utf-8 -*-
# Copyright (c) 2018, Yanis Guenane
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
class ModuleDocFragment(object):
# Standard documentation fragment
- DOCUMENTATION = r'''
+ DOCUMENTATION = r"""
options:
api_token:
description:
- Scaleway OAuth token.
type: str
required: true
- aliases: [ oauth_token ]
+ aliases: [oauth_token]
api_url:
description:
- Scaleway API URL.
type: str
default: https://api.scaleway.com
- aliases: [ base_url ]
+ aliases: [base_url]
api_timeout:
description:
- HTTP timeout to Scaleway API in seconds.
type: int
default: 30
- aliases: [ timeout ]
+ aliases: [timeout]
query_parameters:
description:
- - List of parameters passed to the query string.
+ - List of parameters passed to the query string.
type: dict
default: {}
validate_certs:
description:
- Validate SSL certs of the Scaleway API.
type: bool
- default: yes
+ default: true
notes:
- - Also see the API documentation on U(https://developer.scaleway.com/)
- - If C(api_token) is not set within the module, the following
- environment variables can be used in decreasing order of precedence
- C(SCW_TOKEN), C(SCW_API_KEY), C(SCW_OAUTH_TOKEN) or C(SCW_API_TOKEN).
- - If one wants to use a different C(api_url) one can also set the C(SCW_API_URL)
- environment variable.
-'''
+ - Also see the API documentation on U(https://developer.scaleway.com/).
+ - If O(api_token) is not set within the module, the following environment variables can be used in decreasing order of precedence
+ E(SCW_TOKEN), E(SCW_API_KEY), E(SCW_OAUTH_TOKEN) or E(SCW_API_TOKEN).
+ - If one wants to use a different O(api_url) one can also set the E(SCW_API_URL) environment variable.
+"""
+
+ ACTIONGROUP_SCALEWAY = r"""
+options: {}
+attributes:
+ action_group:
+ description: Use C(group/community.general.scaleway) in C(module_defaults) to set defaults for this module.
+ support: full
+ membership:
+ - community.general.scaleway
+"""
diff --git a/plugins/doc_fragments/scaleway_waitable_resource.py b/plugins/doc_fragments/scaleway_waitable_resource.py
new file mode 100644
index 0000000000..2a14c7571e
--- /dev/null
+++ b/plugins/doc_fragments/scaleway_waitable_resource.py
@@ -0,0 +1,31 @@
+
+# Copyright (c) 2022, Guillaume MARTINEZ
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import annotations
+
+
+class ModuleDocFragment(object):
+
+ # Standard documentation fragment
+ DOCUMENTATION = r"""
+options:
+ wait:
+ description:
+ - Wait for the resource to reach its desired state before returning.
+ type: bool
+ default: true
+ wait_timeout:
+ type: int
+ description:
+ - Time to wait for the resource to reach the expected state.
+ required: false
+ default: 300
+ wait_sleep_time:
+ type: int
+ description:
+ - Time to wait before every attempt to check the state of the resource.
+ required: false
+ default: 3
+"""
diff --git a/plugins/doc_fragments/utm.py b/plugins/doc_fragments/utm.py
index 6700ac5320..831f4ccc96 100644
--- a/plugins/doc_fragments/utm.py
+++ b/plugins/doc_fragments/utm.py
@@ -1,55 +1,55 @@
-# -*- coding: utf-8 -*-
# Copyright (c) 2018, Johannes Brunswicker
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
class ModuleDocFragment(object):
- DOCUMENTATION = r'''
+ DOCUMENTATION = r"""
options:
- headers:
- description:
- - A dictionary of additional headers to be sent to POST and PUT requests.
- - Is needed for some modules
- type: dict
- required: false
- utm_host:
- description:
- - The REST Endpoint of the Sophos UTM.
- type: str
- required: true
- utm_port:
- description:
- - The port of the REST interface.
- type: int
- default: 4444
- utm_token:
- description:
- - "The token used to identify at the REST-API. See U(https://www.sophos.com/en-us/medialibrary/\
- PDFs/documentation/UTMonAWS/Sophos-UTM-RESTful-API.pdf?la=en), Chapter 2.4.2."
- type: str
- required: true
- utm_protocol:
- description:
- - The protocol of the REST Endpoint.
- choices: [ http, https ]
- type: str
- default: https
- validate_certs:
- description:
- - Whether the REST interface's ssl certificate should be verified or not.
- type: bool
- default: yes
- state:
- description:
- - The desired state of the object.
- - C(present) will create or update an object
- - C(absent) will delete an object if it was present
- type: str
- choices: [ absent, present ]
- default: present
-'''
+ headers:
+ description:
+ - A dictionary of additional headers to be sent to POST and PUT requests.
+ - Is needed for some modules.
+ type: dict
+ required: false
+ default: {}
+ utm_host:
+ description:
+ - The REST Endpoint of the Sophos UTM.
+ type: str
+ required: true
+ utm_port:
+ description:
+ - The port of the REST interface.
+ type: int
+ default: 4444
+ utm_token:
+ description:
+ - The token used to identify at the REST-API.
+ - See U(https://www.sophos.com/en-us/medialibrary/PDFs/documentation/UTMonAWS/Sophos-UTM-RESTful-API.pdf?la=en), Chapter
+ 2.4.2.
+ type: str
+ required: true
+ utm_protocol:
+ description:
+ - The protocol of the REST Endpoint.
+ choices: [http, https]
+ type: str
+ default: https
+ validate_certs:
+ description:
+ - Whether the REST interface's SSL certificate should be verified or not.
+ type: bool
+ default: true
+ state:
+ description:
+ - The desired state of the object.
+ - V(present) creates or updates an object.
+ - V(absent) deletes an object if present.
+ type: str
+ choices: [absent, present]
+ default: present
+"""
diff --git a/plugins/doc_fragments/vexata.py b/plugins/doc_fragments/vexata.py
index 31e2f24f74..3ca6684469 100644
--- a/plugins/doc_fragments/vexata.py
+++ b/plugins/doc_fragments/vexata.py
@@ -1,24 +1,13 @@
-# -*- coding: utf-8 -*-
#
# Copyright (c) 2019, Sandeep Kasargod
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
class ModuleDocFragment(object):
- DOCUMENTATION = r'''
-options:
- - See respective platform section for more details
-requirements:
- - See respective platform section for more details
-notes:
- - Ansible modules are available for Vexata VX100 arrays.
-'''
-
# Documentation fragment for Vexata VX100 series
VX100 = r'''
options:
@@ -30,25 +19,26 @@ options:
user:
description:
- Vexata API user with administrative privileges.
+ - Uses the E(VEXATA_USER) environment variable as a fallback.
required: false
type: str
password:
description:
- Vexata API user password.
+ - Uses the E(VEXATA_PASSWORD) environment variable as a fallback.
required: false
type: str
validate_certs:
description:
- - Allows connection when SSL certificates are not valid. Set to C(false) when certificates are not trusted.
- - If set to C(yes), please make sure Python >= 2.7.9 is installed on the given machine.
+ - Allows connection when SSL certificates are not valid. Set to V(false) when certificates are not trusted.
+ - If set to V(true), please make sure Python >= 2.7.9 is installed on the given machine.
required: false
type: bool
- default: 'no'
+ default: false
requirements:
- Vexata VX100 storage array with VXOS >= v3.5.0 on storage array
- vexatapi >= 0.0.1
- - python >= 2.7
- - VEXATA_USER and VEXATA_PASSWORD environment variables must be set if
+ - E(VEXATA_USER) and E(VEXATA_PASSWORD) environment variables must be set if
user and password arguments are not passed to the module directly.
'''
diff --git a/plugins/doc_fragments/xenserver.py b/plugins/doc_fragments/xenserver.py
index 66522fcf4c..7da1391420 100644
--- a/plugins/doc_fragments/xenserver.py
+++ b/plugins/doc_fragments/xenserver.py
@@ -1,41 +1,39 @@
-# -*- coding: utf-8 -*-
# Copyright (c) 2018, Bojan Vitnik
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
class ModuleDocFragment(object):
# Common parameters for XenServer modules
- DOCUMENTATION = r'''
+ DOCUMENTATION = r"""
options:
hostname:
description:
- - The hostname or IP address of the XenServer host or XenServer pool master.
- - If the value is not specified in the task, the value of environment variable C(XENSERVER_HOST) will be used instead.
+ - The hostname or IP address of the XenServer host or XenServer pool master.
+ - If the value is not specified in the task, the value of environment variable E(XENSERVER_HOST) is used instead.
type: str
default: localhost
- aliases: [ host, pool ]
+ aliases: [host, pool]
username:
description:
- - The username to use for connecting to XenServer.
- - If the value is not specified in the task, the value of environment variable C(XENSERVER_USER) will be used instead.
+ - The username to use for connecting to XenServer.
+ - If the value is not specified in the task, the value of environment variable E(XENSERVER_USER) is used instead.
type: str
default: root
- aliases: [ admin, user ]
+ aliases: [admin, user]
password:
description:
- - The password to use for connecting to XenServer.
- - If the value is not specified in the task, the value of environment variable C(XENSERVER_PASSWORD) will be used instead.
+ - The password to use for connecting to XenServer.
+ - If the value is not specified in the task, the value of environment variable E(XENSERVER_PASSWORD) is used instead.
type: str
- aliases: [ pass, pwd ]
+ aliases: [pass, pwd]
validate_certs:
description:
- - Allows connection when SSL certificates are not valid. Set to C(false) when certificates are not trusted.
- - If the value is not specified in the task, the value of environment variable C(XENSERVER_VALIDATE_CERTS) will be used instead.
+ - Allows connection when SSL certificates are not valid. Set to V(false) when certificates are not trusted.
+ - If the value is not specified in the task, the value of environment variable E(XENSERVER_VALIDATE_CERTS) is used instead.
type: bool
- default: yes
-'''
+ default: true
+"""
diff --git a/plugins/filter/accumulate.py b/plugins/filter/accumulate.py
new file mode 100644
index 0000000000..da784ab12b
--- /dev/null
+++ b/plugins/filter/accumulate.py
@@ -0,0 +1,62 @@
+# Copyright (c) Max Gautier
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+DOCUMENTATION = r"""
+name: accumulate
+short_description: Produce a list of accumulated sums of the input list contents
+version_added: 10.1.0
+author: Max Gautier (@VannTen)
+description:
+ - Passthrough to the L(Python itertools.accumulate function,https://docs.python.org/3/library/itertools.html#itertools.accumulate).
+ - Transforms an input list into the cumulative list of results from applying addition to the elements of the input list.
+ - Addition means the default Python implementation of C(+) for input list elements type.
+options:
+ _input:
+ description: A list.
+ type: list
+ elements: any
+ required: true
+"""
+
+RETURN = r"""
+_value:
+ description: A list of cumulated sums of the elements of the input list.
+ type: list
+ elements: any
+"""
+
+EXAMPLES = r"""
+- name: Enumerate parent directories of some path
+ ansible.builtin.debug:
+ var: >
+ "/some/path/to/my/file"
+ | split('/') | map('split', '/')
+ | community.general.accumulate | map('join', '/')
+ # Produces: ['', '/some', '/some/path', '/some/path/to', '/some/path/to/my', '/some/path/to/my/file']
+
+- name: Growing string
+ ansible.builtin.debug:
+ var: "'abc' | community.general.accumulate"
+ # Produces ['a', 'ab', 'abc']
+"""
+
+from itertools import accumulate
+from collections.abc import Sequence
+
+from ansible.errors import AnsibleFilterError
+
+
+def list_accumulate(sequence):
+ if not isinstance(sequence, Sequence):
+ raise AnsibleFilterError(f'Invalid value type ({type(sequence)}) for accumulate ({sequence!r})')
+
+ return accumulate(sequence)
+
+
+class FilterModule(object):
+
+ def filters(self):
+ return {
+ 'accumulate': list_accumulate,
+ }
diff --git a/plugins/filter/counter.py b/plugins/filter/counter.py
index 1b79294b59..f89bfd6d1a 100644
--- a/plugins/filter/counter.py
+++ b/plugins/filter/counter.py
@@ -1,56 +1,54 @@
-# -*- coding: utf-8 -*-
# Copyright (c) 2021, Remy Keil
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
-DOCUMENTATION = '''
- name: counter
- short_description: Counts hashable elements in a sequence
- version_added: 4.3.0
- author: Rémy Keil (@keilr)
- description:
- - Counts hashable elements in a sequence.
- options:
- _input:
- description: A sequence.
- type: list
- elements: any
- required: true
-'''
+DOCUMENTATION = r"""
+name: counter
+short_description: Counts hashable elements in a sequence
+version_added: 4.3.0
+author: Rémy Keil (@keilr)
+description:
+ - Counts hashable elements in a sequence.
+options:
+ _input:
+ description: A sequence.
+ type: list
+ elements: any
+ required: true
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Count occurrences
ansible.builtin.debug:
msg: >-
{{ [1, 'a', 2, 2, 'a', 'b', 'a'] | community.general.counter }}
# Produces: {1: 1, 'a': 3, 2: 2, 'b': 1}
-'''
+"""
-RETURN = '''
- _value:
- description: A dictionary with the elements of the sequence as keys, and their number of occurrences in the sequence as values.
- type: dictionary
-'''
+RETURN = r"""
+_value:
+ description: A dictionary with the elements of the sequence as keys, and their number of occurrences in the sequence as
+ values.
+ type: dictionary
+"""
from ansible.errors import AnsibleFilterError
-from ansible.module_utils.common._collections_compat import Sequence
+from collections.abc import Sequence
from collections import Counter
def counter(sequence):
''' Count elements in a sequence. Returns dict with count result. '''
if not isinstance(sequence, Sequence):
- raise AnsibleFilterError('Argument for community.general.counter must be a sequence (string or list). %s is %s' %
- (sequence, type(sequence)))
+ raise AnsibleFilterError(f'Argument for community.general.counter must be a sequence (string or list). {sequence} is {type(sequence)}')
try:
result = dict(Counter(sequence))
except TypeError as e:
raise AnsibleFilterError(
- "community.general.counter needs a sequence with hashable elements (int, float or str) - %s" % (e)
+ f"community.general.counter needs a sequence with hashable elements (int, float or str) - {e}"
)
return result
diff --git a/plugins/filter/crc32.py b/plugins/filter/crc32.py
index 1f0aa2e9b0..11a6e77495 100644
--- a/plugins/filter/crc32.py
+++ b/plugins/filter/crc32.py
@@ -1,9 +1,7 @@
-# -*- coding: utf-8 -*-
# Copyright (c) 2022, Julien Riou
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
from ansible.errors import AnsibleFilterError
from ansible.module_utils.common.text.converters import to_bytes
@@ -16,45 +14,44 @@ except ImportError:
HAS_ZLIB = False
-DOCUMENTATION = '''
- name: crc32
- short_description: Generate a CRC32 checksum
- version_added: 5.4.0
- description:
- - Checksum a string using CRC32 algorithm and return its hexadecimal representation.
- options:
- _input:
- description:
- - The string to checksum.
- type: string
- required: true
- author:
- - Julien Riou
-'''
-
-EXAMPLES = '''
- - name: Checksum a test string
- ansible.builtin.debug:
- msg: "{{ 'test' | community.general.crc32 }}"
-'''
-
-RETURN = '''
- _value:
- description: CRC32 checksum.
+DOCUMENTATION = r"""
+name: crc32
+short_description: Generate a CRC32 checksum
+version_added: 5.4.0
+description:
+ - Checksum a string using CRC32 algorithm and return its hexadecimal representation.
+options:
+ _input:
+ description:
+ - The string to checksum.
type: string
-'''
+ required: true
+author:
+ - Julien Riou
+"""
+
+EXAMPLES = r"""
+- name: Checksum a test string
+ ansible.builtin.debug:
+ msg: "{{ 'test' | community.general.crc32 }}"
+"""
+
+RETURN = r"""
+_value:
+ description: CRC32 checksum.
+ type: string
+"""
def crc32s(value):
if not is_string(value):
- raise AnsibleFilterError('Invalid value type (%s) for crc32 (%r)' %
- (type(value), value))
+ raise AnsibleFilterError(f'Invalid value type ({type(value)}) for crc32 ({value!r})')
if not HAS_ZLIB:
raise AnsibleFilterError('Failed to import zlib module')
data = to_bytes(value, errors='surrogate_or_strict')
- return "{0:x}".format(crc32(data) & 0xffffffff)
+ return f"{crc32(data) & 0xffffffff:x}"
class FilterModule:
diff --git a/plugins/filter/dict.py b/plugins/filter/dict.py
index 720c9def96..d2d8bb952c 100644
--- a/plugins/filter/dict.py
+++ b/plugins/filter/dict.py
@@ -1,28 +1,26 @@
-# -*- coding: utf-8 -*-
# Copyright (c) 2021, Felix Fontein
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
+from __future__ import annotations
-DOCUMENTATION = '''
- name: dict
- short_description: Convert a list of tuples into a dictionary
- version_added: 3.0.0
- author: Felix Fontein (@felixfontein)
- description:
- - Convert a list of tuples into a dictionary. This is a filter version of the C(dict) function.
- options:
- _input:
- description: A list of tuples (with exactly two elements).
- type: list
- elements: tuple
- required: true
-'''
+DOCUMENTATION = r"""
+name: dict
+short_description: Convert a list of tuples into a dictionary
+version_added: 3.0.0
+author: Felix Fontein (@felixfontein)
+description:
+ - Convert a list of tuples into a dictionary. This is a filter version of the C(dict) function.
+options:
+ _input:
+ description: A list of tuples (with exactly two elements).
+ type: list
+ elements: tuple
+ required: true
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Convert list of tuples into dictionary
ansible.builtin.set_fact:
dictionary: "{{ [[1, 2], ['a', 'b']] | community.general.dict }}"
@@ -53,13 +51,13 @@ EXAMPLES = '''
# "k2": 42,
# "k3": "b"
# }
-'''
+"""
-RETURN = '''
- _value:
- description: The dictionary having the provided key-value pairs.
- type: boolean
-'''
+RETURN = r"""
+_value:
+ description: A dictionary with the provided key-value pairs.
+ type: dictionary
+"""
def dict_filter(sequence):
diff --git a/plugins/filter/dict_kv.py b/plugins/filter/dict_kv.py
index 59595f9573..79c8dd0fe6 100644
--- a/plugins/filter/dict_kv.py
+++ b/plugins/filter/dict_kv.py
@@ -1,42 +1,40 @@
-# -*- coding: utf-8 -*-
# Copyright (C) 2020 Stanislav German-Evtushenko (@giner)
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
-DOCUMENTATION = '''
- name: dict_kv
- short_description: Convert a value to a dictionary with a single key-value pair
- version_added: 1.3.0
- author: Stanislav German-Evtushenko (@giner)
- description:
- - Convert a value to a dictionary with a single key-value pair.
- positional: key
- options:
- _input:
- description: The value for the single key-value pair.
- type: any
- required: true
- key:
- description: The key for the single key-value pair.
- type: any
- required: true
-'''
+DOCUMENTATION = r"""
+name: dict_kv
+short_description: Convert a value to a dictionary with a single key-value pair
+version_added: 1.3.0
+author: Stanislav German-Evtushenko (@giner)
+description:
+ - Convert a value to a dictionary with a single key-value pair.
+positional: key
+options:
+ _input:
+ description: The value for the single key-value pair.
+ type: any
+ required: true
+ key:
+ description: The key for the single key-value pair.
+ type: any
+ required: true
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Create a one-element dictionary from a value
ansible.builtin.debug:
msg: "{{ 'myvalue' | dict_kv('mykey') }}"
# Produces the dictionary {'mykey': 'myvalue'}
-'''
+"""
-RETURN = '''
- _value:
- description: A dictionary with a single key-value pair.
- type: dictionary
-'''
+RETURN = r"""
+_value:
+ description: A dictionary with a single key-value pair.
+ type: dictionary
+"""
def dict_kv(value, key):
diff --git a/plugins/filter/from_csv.py b/plugins/filter/from_csv.py
index 6472b67b1a..160eed959e 100644
--- a/plugins/filter/from_csv.py
+++ b/plugins/filter/from_csv.py
@@ -1,62 +1,60 @@
-# -*- coding: utf-8 -*-
# Copyright (c) 2021, Andrew Pantuso (@ajpantuso)
# Copyright (c) 2018, Dag Wieers (@dagwieers)
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
+from __future__ import annotations
-DOCUMENTATION = '''
- name: from_csv
- short_description: Converts CSV text input into list of dicts
- version_added: 2.3.0
- author: Andrew Pantuso (@Ajpantuso)
- description:
- - Converts CSV text input into list of dictionaries.
- options:
- _input:
- description: A string containing a CSV document.
- type: string
- required: true
- dialect:
- description:
- - The CSV dialect to use when parsing the CSV file.
- - Possible values include C(excel), C(excel-tab) or C(unix).
- type: str
- default: excel
- fieldnames:
- description:
- - A list of field names for every column.
- - This is needed if the CSV does not have a header.
- type: list
- elements: str
- delimiter:
- description:
- - A one-character string used to separate fields.
- - When using this parameter, you change the default value used by I(dialect).
- - The default value depends on the dialect used.
- type: str
- skipinitialspace:
- description:
- - Whether to ignore any whitespaces immediately following the delimiter.
- - When using this parameter, you change the default value used by I(dialect).
- - The default value depends on the dialect used.
- type: bool
- strict:
- description:
- - Whether to raise an exception on bad CSV input.
- - When using this parameter, you change the default value used by I(dialect).
- - The default value depends on the dialect used.
- type: bool
-'''
+DOCUMENTATION = r"""
+name: from_csv
+short_description: Converts CSV text input into list of dicts
+version_added: 2.3.0
+author: Andrew Pantuso (@Ajpantuso)
+description:
+ - Converts CSV text input into list of dictionaries.
+options:
+ _input:
+ description: A string containing a CSV document.
+ type: string
+ required: true
+ dialect:
+ description:
+ - The CSV dialect to use when parsing the CSV file.
+ - Possible values include V(excel), V(excel-tab) or V(unix).
+ type: str
+ default: excel
+ fieldnames:
+ description:
+ - A list of field names for every column.
+ - This is needed if the CSV does not have a header.
+ type: list
+ elements: str
+ delimiter:
+ description:
+ - A one-character string used to separate fields.
+ - When using this parameter, you change the default value used by O(dialect).
+ - The default value depends on the dialect used.
+ type: str
+ skipinitialspace:
+ description:
+ - Whether to ignore any whitespaces immediately following the delimiter.
+ - When using this parameter, you change the default value used by O(dialect).
+ - The default value depends on the dialect used.
+ type: bool
+ strict:
+ description:
+ - Whether to raise an exception on bad CSV input.
+ - When using this parameter, you change the default value used by O(dialect).
+ - The default value depends on the dialect used.
+ type: bool
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Parse a CSV file's contents
ansible.builtin.debug:
msg: >-
- {{ csv_data | community.genera.from_csv(dialect='unix') }}
+ {{ csv_data | community.general.from_csv(dialect='unix') }}
vars:
csv_data: |
Column 1,Value
@@ -71,17 +69,16 @@ EXAMPLES = '''
# "Column 1": "bar",
# "Value": "42",
# }
-'''
+"""
-RETURN = '''
- _value:
- description: A list with one dictionary per row.
- type: list
- elements: dictionary
-'''
+RETURN = r"""
+_value:
+ description: A list with one dictionary per row.
+ type: list
+ elements: dictionary
+"""
from ansible.errors import AnsibleFilterError
-from ansible.module_utils.common.text.converters import to_native
from ansible_collections.community.general.plugins.module_utils.csv import (initialize_dialect, read_csv, CSVError,
DialectNotAvailableError,
@@ -99,7 +96,7 @@ def from_csv(data, dialect='excel', fieldnames=None, delimiter=None, skipinitial
try:
dialect = initialize_dialect(dialect, **dialect_params)
except (CustomDialectFailureError, DialectNotAvailableError) as e:
- raise AnsibleFilterError(to_native(e))
+ raise AnsibleFilterError(str(e))
reader = read_csv(data, dialect, fieldnames)
@@ -109,7 +106,7 @@ def from_csv(data, dialect='excel', fieldnames=None, delimiter=None, skipinitial
for row in reader:
data_list.append(row)
except CSVError as e:
- raise AnsibleFilterError("Unable to process file: %s" % to_native(e))
+ raise AnsibleFilterError(f"Unable to process file: {e}")
return data_list
diff --git a/plugins/filter/from_ini.py b/plugins/filter/from_ini.py
new file mode 100644
index 0000000000..07b16d4ac2
--- /dev/null
+++ b/plugins/filter/from_ini.py
@@ -0,0 +1,95 @@
+
+# Copyright (c) 2023, Steffen Scheib
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import annotations
+
+DOCUMENTATION = r"""
+name: from_ini
+short_description: Converts INI text input into a dictionary
+version_added: 8.2.0
+author: Steffen Scheib (@sscheib)
+description:
+ - Converts INI text input into a dictionary.
+options:
+ _input:
+ description: A string containing an INI document.
+ type: string
+ required: true
+"""
+
+EXAMPLES = r"""
+- name: Slurp an INI file
+ ansible.builtin.slurp:
+ src: /etc/rhsm/rhsm.conf
+ register: rhsm_conf
+
+- name: Display the INI file as dictionary
+ ansible.builtin.debug:
+ var: rhsm_conf.content | b64decode | community.general.from_ini
+
+- name: Set a new dictionary fact with the contents of the INI file
+ ansible.builtin.set_fact:
+ rhsm_dict: >-
+ {{
+ rhsm_conf.content | b64decode | community.general.from_ini
+ }}
+"""
+
+RETURN = r"""
+_value:
+ description: A dictionary representing the INI file.
+ type: dictionary
+"""
+
+
+from io import StringIO
+from configparser import ConfigParser
+
+from ansible.errors import AnsibleFilterError
+
+
+class IniParser(ConfigParser):
+ ''' Implements a configparser which is able to return a dict '''
+
+ def __init__(self):
+ super().__init__(interpolation=None)
+ self.optionxform = str
+
+ def as_dict(self):
+ d = dict(self._sections)
+ for k in d:
+ d[k] = dict(self._defaults, **d[k])
+ d[k].pop('__name__', None)
+
+ if self._defaults:
+ d['DEFAULT'] = dict(self._defaults)
+
+ return d
+
+
+def from_ini(obj):
+ ''' Read the given string as INI file and return a dict '''
+
+ if not isinstance(obj, str):
+ raise AnsibleFilterError(f'from_ini requires a str, got {type(obj)}')
+
+ parser = IniParser()
+
+ try:
+ parser.read_file(StringIO(obj))
+ except Exception as ex:
+ raise AnsibleFilterError(f'from_ini failed to parse given string: {ex}', orig_exc=ex)
+
+ return parser.as_dict()
+
+
+class FilterModule(object):
+ ''' Query filter '''
+
+ def filters(self):
+
+ return {
+ 'from_ini': from_ini
+ }
diff --git a/plugins/filter/groupby_as_dict.py b/plugins/filter/groupby_as_dict.py
index 4a8f4c6dc1..766d365575 100644
--- a/plugins/filter/groupby_as_dict.py
+++ b/plugins/filter/groupby_as_dict.py
@@ -1,32 +1,32 @@
-# -*- coding: utf-8 -*-
# Copyright (c) 2021, Felix Fontein
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
-DOCUMENTATION = '''
- name: groupby_as_dict
- short_description: Transform a sequence of dictionaries to a dictionary where the dictionaries are indexed by an attribute
- version_added: 3.1.0
- author: Felix Fontein (@felixfontein)
- description:
- - Transform a sequence of dictionaries to a dictionary where the dictionaries are indexed by an attribute.
- positional: attribute
- options:
- _input:
- description: A list of dictionaries
- type: list
- elements: dictionary
- required: true
- attribute:
- description: The attribute to use as the key.
- type: str
- required: true
-'''
+DOCUMENTATION = r"""
+name: groupby_as_dict
+short_description: Transform a sequence of dictionaries to a dictionary where the dictionaries are indexed by an attribute
+version_added: 3.1.0
+author: Felix Fontein (@felixfontein)
+description:
+ - Transform a sequence of dictionaries to a dictionary where the dictionaries are indexed by an attribute.
+ - This filter is similar to the Jinja2 C(groupby) filter. Use the Jinja2 C(groupby) filter if you have multiple entries
+ with the same value, or when you need a dictionary with list values, or when you need to use deeply nested attributes.
+positional: attribute
+options:
+ _input:
+ description: A list of dictionaries.
+ type: list
+ elements: dictionary
+ required: true
+ attribute:
+ description: The attribute to use as the key.
+ type: str
+ required: true
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Arrange a list of dictionaries as a dictionary of dictionaries
ansible.builtin.debug:
msg: "{{ sequence | community.general.groupby_as_dict('key') }}"
@@ -44,16 +44,16 @@ EXAMPLES = '''
# other_value:
# key: other_value
# baz: bar
-'''
+"""
-RETURN = '''
- _value:
- description: A dictionary containing the dictionaries from the list as values.
- type: dictionary
-'''
+RETURN = r"""
+_value:
+ description: A dictionary containing the dictionaries from the list as values.
+ type: dictionary
+"""
from ansible.errors import AnsibleFilterError
-from ansible.module_utils.common._collections_compat import Mapping, Sequence
+from collections.abc import Mapping, Sequence
def groupby_as_dict(sequence, attribute):
@@ -70,12 +70,12 @@ def groupby_as_dict(sequence, attribute):
result = dict()
for list_index, element in enumerate(sequence):
if not isinstance(element, Mapping):
- raise AnsibleFilterError('Sequence element #{0} is not a mapping'.format(list_index))
+ raise AnsibleFilterError(f'Sequence element #{list_index} is not a mapping')
if attribute not in element:
- raise AnsibleFilterError('Attribute not contained in element #{0} of sequence'.format(list_index))
+ raise AnsibleFilterError(f'Attribute not contained in element #{list_index} of sequence')
result_index = element[attribute]
if result_index in result:
- raise AnsibleFilterError('Multiple sequence entries have attribute value {0!r}'.format(result_index))
+ raise AnsibleFilterError(f'Multiple sequence entries have attribute value {result_index!r}')
result[result_index] = element
return result
diff --git a/plugins/filter/hashids.py b/plugins/filter/hashids.py
index 45fba83c03..c58ae4d70b 100644
--- a/plugins/filter/hashids.py
+++ b/plugins/filter/hashids.py
@@ -1,21 +1,23 @@
-# -*- coding: utf-8 -*-
# Copyright (c) 2021, Andrew Pantuso (@ajpantuso)
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
+from __future__ import annotations
from ansible.errors import (
AnsibleError,
AnsibleFilterError,
- AnsibleFilterTypeError,
)
from ansible.module_utils.common.text.converters import to_native
from ansible.module_utils.common.collections import is_sequence
+try:
+ from ansible.errors import AnsibleTypeError
+except ImportError:
+ from ansible.errors import AnsibleFilterTypeError as AnsibleTypeError
+
try:
from hashids import Hashids
HAS_HASHIDS = True
@@ -27,7 +29,7 @@ def initialize_hashids(**kwargs):
if not HAS_HASHIDS:
raise AnsibleError("The hashids library must be installed in order to use this plugin")
- params = dict((k, v) for k, v in kwargs.items() if v)
+ params = {k: v for k, v in kwargs.items() if v}
try:
return Hashids(**params)
@@ -64,9 +66,7 @@ def hashids_encode(nums, salt=None, alphabet=None, min_length=None):
try:
hashid = hashids.encode(*nums)
except TypeError as e:
- raise AnsibleFilterTypeError(
- "Data to encode must by a tuple or list of ints: %s" % to_native(e)
- )
+ raise AnsibleTypeError(f"Data to encode must by a tuple or list of ints: {e}")
return hashid
diff --git a/plugins/filter/jc.py b/plugins/filter/jc.py
index 8f83871407..92996e812c 100644
--- a/plugins/filter/jc.py
+++ b/plugins/filter/jc.py
@@ -1,47 +1,52 @@
-# -*- coding: utf-8 -*-
# Copyright (c) 2015, Filipe Niero Felisbino
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
#
# contributed by Kelly Brazil
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
-DOCUMENTATION = '''
- name: jc
- short_description: Convert output of many shell commands and file-types to JSON
- version_added: 1.1.0
- author: Kelly Brazil (@kellyjonbrazil)
- description:
- - Convert output of many shell commands and file-types to JSON.
- - Uses the L(jc library,https://github.com/kellyjonbrazil/jc).
- positional: parser
- options:
- _input:
- description: The data to convert.
- type: string
- required: true
- parser:
- description:
- - The correct parser for the input data.
- - For example C(ifconfig).
- - See U(https://github.com/kellyjonbrazil/jc#parsers) for the latest list of parsers.
- type: string
- required: true
- quiet:
- description: Set to C(false) to not suppress warnings.
- type: boolean
- default: true
- raw:
- description: Set to C(true) to return pre-processed JSON.
- type: boolean
- default: false
- requirements:
- - jc (https://github.com/kellyjonbrazil/jc)
-'''
+DOCUMENTATION = r"""
+name: jc
+short_description: Convert output of many shell commands and file-types to JSON
+version_added: 1.1.0
+author: Kelly Brazil (@kellyjonbrazil)
+description:
+ - Convert output of many shell commands and file-types to JSON.
+ - Uses the L(jc library,https://github.com/kellyjonbrazil/jc).
+positional: parser
+options:
+ _input:
+ description: The data to convert.
+ type: string
+ required: true
+ parser:
+ description:
+ - The correct parser for the input data.
+ - For example V(ifconfig).
+ - 'Note: use underscores instead of dashes (if any) in the parser module name.'
+ - See U(https://github.com/kellyjonbrazil/jc#parsers) for the latest list of parsers.
+ type: string
+ required: true
+ quiet:
+ description: Set to V(false) to not suppress warnings.
+ type: boolean
+ default: true
+ raw:
+ description: Set to V(true) to return pre-processed JSON.
+ type: boolean
+ default: false
+requirements:
+ - jc installed as a Python library (U(https://pypi.org/project/jc/))
+"""
+
+EXAMPLES = r"""
+- name: Install the prereqs of the jc filter (jc Python package) on the Ansible controller
+ delegate_to: localhost
+ ansible.builtin.pip:
+ name: jc
+ state: present
-EXAMPLES = '''
- name: Run command
ansible.builtin.command: uname -a
register: result
@@ -61,13 +66,13 @@ EXAMPLES = '''
# "operating_system": "GNU/Linux",
# "processor": "x86_64"
# }
-'''
+"""
-RETURN = '''
- _value:
- description: The processed output.
- type: any
-'''
+RETURN = r"""
+_value:
+ description: The processed output.
+ type: any
+"""
from ansible.errors import AnsibleError, AnsibleFilterError
import importlib
@@ -79,7 +84,7 @@ except ImportError:
HAS_LIB = False
-def jc(data, parser, quiet=True, raw=False):
+def jc_filter(data, parser, quiet=True, raw=False):
"""Convert returned command output to JSON using the JC library
Arguments:
@@ -94,15 +99,19 @@ def jc(data, parser, quiet=True, raw=False):
dictionary or list of dictionaries
Example:
-
- name: run date command
hosts: ubuntu
tasks:
- - shell: date
+ - name: install the prereqs of the jc filter (jc Python package) on the Ansible controller
+ delegate_to: localhost
+ ansible.builtin.pip:
+ name: jc
+ state: present
+ - ansible.builtin.shell: date
register: result
- - set_fact:
+ - ansible.builtin.set_fact:
myvar: "{{ result.stdout | community.general.jc('date') }}"
- - debug:
+ - ansible.builtin.debug:
msg: "{{ myvar }}"
produces:
@@ -124,14 +133,20 @@ def jc(data, parser, quiet=True, raw=False):
"""
if not HAS_LIB:
- raise AnsibleError('You need to install "jc" prior to running jc filter')
+ raise AnsibleError('You need to install "jc" as a Python library on the Ansible controller prior to running jc filter')
try:
- jc_parser = importlib.import_module('jc.parsers.' + parser)
- return jc_parser.parse(data, quiet=quiet, raw=raw)
+ # new API (jc v1.18.0 and higher) allows use of plugin parsers
+ if hasattr(jc, 'parse'):
+ return jc.parse(parser, data, quiet=quiet, raw=raw)
+
+ # old API (jc v1.17.7 and lower)
+ else:
+ jc_parser = importlib.import_module(f'jc.parsers.{parser}')
+ return jc_parser.parse(data, quiet=quiet, raw=raw)
except Exception as e:
- raise AnsibleFilterError('Error in jc filter plugin: %s' % e)
+ raise AnsibleFilterError(f'Error in jc filter plugin: {e}')
class FilterModule(object):
@@ -139,5 +154,5 @@ class FilterModule(object):
def filters(self):
return {
- 'jc': jc
+ 'jc': jc_filter,
}
diff --git a/plugins/filter/json_diff.yml b/plugins/filter/json_diff.yml
new file mode 100644
index 0000000000..a370564d7a
--- /dev/null
+++ b/plugins/filter/json_diff.yml
@@ -0,0 +1,56 @@
+---
+# Copyright (c) Stanislav Meduna (@numo68)
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+DOCUMENTATION:
+ name: json_diff
+ short_description: Create a JSON patch by comparing two JSON files
+ description:
+ - This filter compares the input with the argument and computes a list of operations
+ that can be consumed by the P(community.general.json_patch_recipe#filter) to change the input
+ to the argument.
+ requirements:
+ - jsonpatch
+ version_added: 10.3.0
+ author:
+ - Stanislav Meduna (@numo68)
+ positional: target
+ options:
+ _input:
+ description: A list or a dictionary representing a source JSON object, or a string containing a JSON object.
+ type: raw
+ required: true
+ target:
+ description: A list or a dictionary representing a target JSON object, or a string containing a JSON object.
+ type: raw
+ required: true
+ seealso:
+ - name: RFC 6902
+ description: JavaScript Object Notation (JSON) Patch
+ link: https://datatracker.ietf.org/doc/html/rfc6902
+ - name: RFC 6901
+ description: JavaScript Object Notation (JSON) Pointer
+ link: https://datatracker.ietf.org/doc/html/rfc6901
+ - name: jsonpatch Python Package
+ description: A Python library for applying JSON patches
+ link: https://pypi.org/project/jsonpatch/
+
+RETURN:
+ _value:
+ description: A list of JSON patch operations to apply.
+ type: list
+ elements: dict
+
+EXAMPLES: |
+ - name: Compute a difference
+ ansible.builtin.debug:
+ msg: "{{ input | community.general.json_diff(target) }}"
+ vars:
+ input: {"foo": 1, "bar":{"baz": 2}, "baw": [1, 2, 3], "hello": "day"}
+ target: {"foo": 1, "bar": {"baz": 2}, "baw": [1, 3], "baq": {"baz": 2}, "hello": "night"}
+ # => [
+ # {"op": "add", "path": "/baq", "value": {"baz": 2}},
+ # {"op": "remove", "path": "/baw/1"},
+ # {"op": "replace", "path": "/hello", "value": "night"}
+ # ]
diff --git a/plugins/filter/json_patch.py b/plugins/filter/json_patch.py
new file mode 100644
index 0000000000..8cd6bd08b0
--- /dev/null
+++ b/plugins/filter/json_patch.py
@@ -0,0 +1,193 @@
+# Copyright (c) Stanislav Meduna (@numo68)
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import annotations
+from json import loads
+from typing import TYPE_CHECKING
+from ansible.errors import AnsibleFilterError
+
+
+if TYPE_CHECKING:
+ from typing import Any, Callable, Union
+
+try:
+ import jsonpatch
+
+except ImportError as exc:
+ HAS_LIB = False
+ JSONPATCH_IMPORT_ERROR = exc
+else:
+ HAS_LIB = True
+ JSONPATCH_IMPORT_ERROR = None
+
+OPERATIONS_AVAILABLE = ["add", "copy", "move", "remove", "replace", "test"]
+OPERATIONS_NEEDING_FROM = ["copy", "move"]
+OPERATIONS_NEEDING_VALUE = ["add", "replace", "test"]
+
+
+class FilterModule:
+ """Filter plugin."""
+
+ def check_json_object(self, filter_name: str, object_name: str, inp: Any):
+ if isinstance(inp, (str, bytes, bytearray)):
+ try:
+ return loads(inp)
+ except Exception as e:
+ raise AnsibleFilterError(
+ f"{filter_name}: could not decode JSON from {object_name}: {e}"
+ ) from e
+
+ if not isinstance(inp, (list, dict)):
+ raise AnsibleFilterError(
+ f"{filter_name}: {object_name} is not dictionary, list or string"
+ )
+
+ return inp
+
+ def check_patch_arguments(self, filter_name: str, args: dict):
+
+ if "op" not in args or not isinstance(args["op"], str):
+ raise AnsibleFilterError(f"{filter_name}: 'op' argument is not a string")
+
+ if args["op"] not in OPERATIONS_AVAILABLE:
+ raise AnsibleFilterError(
+ f"{filter_name}: unsupported 'op' argument: {args['op']}"
+ )
+
+ if "path" not in args or not isinstance(args["path"], str):
+ raise AnsibleFilterError(f"{filter_name}: 'path' argument is not a string")
+
+ if args["op"] in OPERATIONS_NEEDING_FROM:
+ if "from" not in args:
+ raise AnsibleFilterError(
+ f"{filter_name}: 'from' argument missing for '{args['op']}' operation"
+ )
+ if not isinstance(args["from"], str):
+ raise AnsibleFilterError(
+ f"{filter_name}: 'from' argument is not a string"
+ )
+
+ def json_patch(
+ self,
+ inp: Union[str, list, dict, bytes, bytearray],
+ op: str,
+ path: str,
+ value: Any = None,
+ **kwargs: dict,
+ ) -> Any:
+
+ if not HAS_LIB:
+ raise AnsibleFilterError(
+ "You need to install 'jsonpatch' package prior to running 'json_patch' filter"
+ ) from JSONPATCH_IMPORT_ERROR
+
+ args = {"op": op, "path": path}
+ from_arg = kwargs.pop("from", None)
+ fail_test = kwargs.pop("fail_test", False)
+
+ if kwargs:
+ raise AnsibleFilterError(
+ f"json_patch: unexpected keywords arguments: {', '.join(sorted(kwargs))}"
+ )
+
+ if not isinstance(fail_test, bool):
+ raise AnsibleFilterError("json_patch: 'fail_test' argument is not a bool")
+
+ if op in OPERATIONS_NEEDING_VALUE:
+ args["value"] = value
+ if op in OPERATIONS_NEEDING_FROM and from_arg is not None:
+ args["from"] = from_arg
+
+ inp = self.check_json_object("json_patch", "input", inp)
+ self.check_patch_arguments("json_patch", args)
+
+ result = None
+
+ try:
+ result = jsonpatch.apply_patch(inp, [args])
+ except jsonpatch.JsonPatchTestFailed as e:
+ if fail_test:
+ raise AnsibleFilterError(
+ f"json_patch: test operation failed: {e}"
+ ) from e
+ else:
+ pass
+ except Exception as e:
+ raise AnsibleFilterError(f"json_patch: patch failed: {e}") from e
+
+ return result
+
+ def json_patch_recipe(
+ self,
+ inp: Union[str, list, dict, bytes, bytearray],
+ operations: list,
+ /,
+ fail_test: bool = False,
+ ) -> Any:
+
+ if not HAS_LIB:
+ raise AnsibleFilterError(
+ "You need to install 'jsonpatch' package prior to running 'json_patch_recipe' filter"
+ ) from JSONPATCH_IMPORT_ERROR
+
+ if not isinstance(operations, list):
+ raise AnsibleFilterError(
+ "json_patch_recipe: 'operations' needs to be a list"
+ )
+
+ if not isinstance(fail_test, bool):
+ raise AnsibleFilterError("json_patch: 'fail_test' argument is not a bool")
+
+ result = None
+
+ inp = self.check_json_object("json_patch_recipe", "input", inp)
+ for args in operations:
+ self.check_patch_arguments("json_patch_recipe", args)
+
+ try:
+ result = jsonpatch.apply_patch(inp, operations)
+ except jsonpatch.JsonPatchTestFailed as e:
+ if fail_test:
+ raise AnsibleFilterError(
+ f"json_patch_recipe: test operation failed: {e}"
+ ) from e
+ else:
+ pass
+ except Exception as e:
+ raise AnsibleFilterError(f"json_patch_recipe: patch failed: {e}") from e
+
+ return result
+
+ def json_diff(
+ self,
+ inp: Union[str, list, dict, bytes, bytearray],
+ target: Union[str, list, dict, bytes, bytearray],
+ ) -> list:
+
+ if not HAS_LIB:
+ raise AnsibleFilterError(
+ "You need to install 'jsonpatch' package prior to running 'json_diff' filter"
+ ) from JSONPATCH_IMPORT_ERROR
+
+ inp = self.check_json_object("json_diff", "input", inp)
+ target = self.check_json_object("json_diff", "target", target)
+
+ try:
+ result = list(jsonpatch.make_patch(inp, target))
+ except Exception as e:
+ raise AnsibleFilterError(f"JSON diff failed: {e}") from e
+
+ return result
+
+ def filters(self) -> dict[str, Callable[..., Any]]:
+ """Map filter plugin names to their functions.
+
+ Returns:
+ dict: The filter plugin functions.
+ """
+ return {
+ "json_patch": self.json_patch,
+ "json_patch_recipe": self.json_patch_recipe,
+ "json_diff": self.json_diff,
+ }
diff --git a/plugins/filter/json_patch.yml b/plugins/filter/json_patch.yml
new file mode 100644
index 0000000000..42a0309202
--- /dev/null
+++ b/plugins/filter/json_patch.yml
@@ -0,0 +1,145 @@
+---
+# Copyright (c) Stanislav Meduna (@numo68)
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+DOCUMENTATION:
+ name: json_patch
+ short_description: Apply a JSON-Patch (RFC 6902) operation to an object
+ description:
+ - This filter applies a single JSON patch operation and returns a modified object.
+ - If the operation is a test, the filter returns an ummodified object if the test
+ succeeded and a V(none) value otherwise.
+ requirements:
+ - jsonpatch
+ version_added: 10.3.0
+ author:
+ - Stanislav Meduna (@numo68)
+ positional: op, path, value
+ options:
+ _input:
+ description: A list or a dictionary representing a JSON object, or a string containing a JSON object.
+ type: raw
+ required: true
+ op:
+ description: Operation to perform (see L(RFC 6902, https://datatracker.ietf.org/doc/html/rfc6902)).
+ type: str
+ choices: [add, copy, move, remove, replace, test]
+ required: true
+ path:
+ description: JSON Pointer path to the target location (see L(RFC 6901, https://datatracker.ietf.org/doc/html/rfc6901)).
+ type: str
+ required: true
+ value:
+ description: Value to use in the operation. Ignored for O(op=copy), O(op=move), and O(op=remove).
+ type: raw
+ from:
+ description: The source location for the copy and move operation. Mandatory
+ for O(op=copy) and O(op=move), ignored otherwise.
+ type: str
+ fail_test:
+ description: If V(false), a failed O(op=test) will return V(none). If V(true), the filter
+ invocation will fail with an error.
+ type: bool
+ default: false
+ seealso:
+ - name: RFC 6902
+ description: JavaScript Object Notation (JSON) Patch
+ link: https://datatracker.ietf.org/doc/html/rfc6902
+ - name: RFC 6901
+ description: JavaScript Object Notation (JSON) Pointer
+ link: https://datatracker.ietf.org/doc/html/rfc6901
+ - name: jsonpatch Python Package
+ description: A Python library for applying JSON patches
+ link: https://pypi.org/project/jsonpatch/
+
+RETURN:
+ _value:
+ description: A modified object or V(none) if O(op=test), O(fail_test=false) and the test failed.
+ type: any
+ returned: always
+
+EXAMPLES: |
+ - name: Insert a new element into an array at a specified index
+ ansible.builtin.debug:
+ msg: "{{ input | community.general.json_patch('add', '/1', {'baz': 'qux'}) }}"
+ vars:
+ input: ["foo": { "one": 1 }, "bar": { "two": 2 }]
+ # => [{"foo": {"one": 1}}, {"baz": "qux"}, {"bar": {"two": 2}}]
+
+ - name: Insert a new key into a dictionary
+ ansible.builtin.debug:
+ msg: "{{ input | community.general.json_patch('add', '/bar/baz', 'qux') }}"
+ vars:
+ input: { "foo": { "one": 1 }, "bar": { "two": 2 } }
+ # => {"foo": {"one": 1}, "bar": {"baz": "qux", "two": 2}}
+
+ - name: Input is a string
+ ansible.builtin.debug:
+ msg: "{{ input | community.general.json_patch('add', '/baz', 3) }}"
+ vars:
+ input: '{ "foo": { "one": 1 }, "bar": { "two": 2 } }'
+ # => {"foo": {"one": 1}, "bar": { "two": 2 }, "baz": 3}
+
+ - name: Existing key is replaced
+ ansible.builtin.debug:
+ msg: "{{ input | community.general.json_patch('add', '/bar', 'qux') }}"
+ vars:
+ input: { "foo": { "one": 1 }, "bar": { "two": 2 } }
+ # => {"foo": {"one": 1}, "bar": "qux"}
+
+ - name: Escaping tilde as ~0 and slash as ~1 in the path
+ ansible.builtin.debug:
+ msg: "{{ input | community.general.json_patch('add', '/~0~1', 'qux') }}"
+ vars:
+ input: {}
+ # => {"~/": "qux"}
+
+ - name: Add at the end of the array
+ ansible.builtin.debug:
+ msg: "{{ input | community.general.json_patch('add', '/-', 4) }}"
+ vars:
+ input: [1, 2, 3]
+ # => [1, 2, 3, 4]
+
+ - name: Remove a key
+ ansible.builtin.debug:
+ msg: "{{ input | community.general.json_patch('remove', '/bar') }}"
+ vars:
+ input: { "foo": { "one": 1 }, "bar": { "two": 2 } }
+ # => {"foo": {"one": 1} }
+
+ - name: Replace a value
+ ansible.builtin.debug:
+ msg: "{{ input | community.general.json_patch('replace', '/bar', 2) }}"
+ vars:
+ input: { "foo": { "one": 1 }, "bar": { "two": 2 } }
+ # => {"foo": {"one": 1}, "bar": 2}
+
+ - name: Copy a value
+ ansible.builtin.debug:
+ msg: "{{ input | community.general.json_patch('copy', '/baz', from='/bar') }}"
+ vars:
+ input: { "foo": { "one": 1 }, "bar": { "two": 2 } }
+ # => {"foo": {"one": 1}, "bar": { "two": 2 }, "baz": { "two": 2 }}
+
+ - name: Move a value
+ ansible.builtin.debug:
+ msg: "{{ input | community.general.json_patch('move', '/baz', from='/bar') }}"
+ vars:
+ input: { "foo": { "one": 1 }, "bar": { "two": 2 } }
+ # => {"foo": {"one": 1}, "baz": { "two": 2 }}
+
+ - name: Successful test
+ ansible.builtin.debug:
+ msg: "{{ input | community.general.json_patch('test', '/bar/two', 2) | ternary('OK', 'Failed') }}"
+ vars:
+ input: { "foo": { "one": 1 }, "bar": { "two": 2 } }
+ # => OK
+
+ - name: Unuccessful test
+ ansible.builtin.debug:
+ msg: "{{ input | community.general.json_patch('test', '/bar/two', 9) | ternary('OK', 'Failed') }}"
+ vars:
+ input: { "foo": { "one": 1 }, "bar": { "two": 2 } }
+ # => Failed
diff --git a/plugins/filter/json_patch_recipe.yml b/plugins/filter/json_patch_recipe.yml
new file mode 100644
index 0000000000..671600b941
--- /dev/null
+++ b/plugins/filter/json_patch_recipe.yml
@@ -0,0 +1,102 @@
+---
+# Copyright (c) Stanislav Meduna (@numo68)
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+DOCUMENTATION:
+ name: json_patch_recipe
+ short_description: Apply JSON-Patch (RFC 6902) operations to an object
+ description:
+ - This filter sequentially applies JSON patch operations and returns a modified object.
+ - If there is a test operation in the list, the filter continues if the test
+ succeeded and returns a V(none) value otherwise.
+ requirements:
+ - jsonpatch
+ version_added: 10.3.0
+ author:
+ - Stanislav Meduna (@numo68)
+ positional: operations, fail_test
+ options:
+ _input:
+ description: A list or a dictionary representing a JSON object, or a string containing a JSON object.
+ type: raw
+ required: true
+ operations:
+ description: A list of JSON patch operations to apply.
+ type: list
+ elements: dict
+ required: true
+ suboptions:
+ op:
+ description: Operation to perform (see L(RFC 6902, https://datatracker.ietf.org/doc/html/rfc6902)).
+ type: str
+ choices: [add, copy, move, remove, replace, test]
+ required: true
+ path:
+ description: JSON Pointer path to the target location (see L(RFC 6901, https://datatracker.ietf.org/doc/html/rfc6901)).
+ type: str
+ required: true
+ value:
+ description: Value to use in the operation. Ignored for O(operations[].op=copy), O(operations[].op=move), and O(operations[].op=remove).
+ type: raw
+ from:
+ description: The source location for the copy and move operation. Mandatory
+ for O(operations[].op=copy) and O(operations[].op=move), ignored otherwise.
+ type: str
+ fail_test:
+ description: If V(false), a failed O(operations[].op=test) will return V(none). If V(true), the filter
+ invocation will fail with an error.
+ type: bool
+ default: false
+ seealso:
+ - name: RFC 6902
+ description: JavaScript Object Notation (JSON) Patch
+ link: https://datatracker.ietf.org/doc/html/rfc6902
+ - name: RFC 6901
+ description: JavaScript Object Notation (JSON) Pointer
+ link: https://datatracker.ietf.org/doc/html/rfc6901
+ - name: jsonpatch Python Package
+ description: A Python library for applying JSON patches
+ link: https://pypi.org/project/jsonpatch/
+
+RETURN:
+ _value:
+ description: A modified object or V(none) if O(operations[].op=test), O(fail_test=false)
+ and the test failed.
+ type: any
+ returned: always
+
+EXAMPLES: |
+ - name: Apply a series of operations
+ ansible.builtin.debug:
+ msg: "{{ input | community.general.json_patch_recipe(operations) }}"
+ vars:
+ input: {}
+ operations:
+ - op: 'add'
+ path: '/foo'
+ value: 1
+ - op: 'add'
+ path: '/bar'
+ value: []
+ - op: 'add'
+ path: '/bar/-'
+ value: 2
+ - op: 'add'
+ path: '/bar/0'
+ value: 1
+ - op: 'remove'
+ path: '/bar/0'
+ - op: 'move'
+ from: '/foo'
+ path: '/baz'
+ - op: 'copy'
+ from: '/baz'
+ path: '/bax'
+ - op: 'copy'
+ from: '/baz'
+ path: '/bay'
+ - op: 'replace'
+ path: '/baz'
+ value: [10, 20, 30]
+ # => {"bar":[2],"bax":1,"bay":1,"baz":[10,20,30]}
diff --git a/plugins/filter/json_query.py b/plugins/filter/json_query.py
index 9e8fa4ef2e..e040a4aca2 100644
--- a/plugins/filter/json_query.py
+++ b/plugins/filter/json_query.py
@@ -1,34 +1,32 @@
-# -*- coding: utf-8 -*-
# Copyright (c) 2015, Filipe Niero Felisbino
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
-DOCUMENTATION = '''
- name: json_query
- short_description: Select a single element or a data subset from a complex data structure
- description:
- - This filter lets you query a complex JSON structure and iterate over it using a loop structure.
- positional: expr
- options:
- _input:
- description:
- - The JSON data to query.
- type: any
- required: true
- expr:
- description:
- - The query expression.
- - See U(http://jmespath.org/examples.html) for examples.
- type: string
- required: true
- requirements:
- - jmespath
-'''
+DOCUMENTATION = r"""
+name: json_query
+short_description: Select a single element or a data subset from a complex data structure
+description:
+ - This filter lets you query a complex JSON structure and iterate over it using a loop structure.
+positional: expr
+options:
+ _input:
+ description:
+ - The JSON data to query.
+ type: any
+ required: true
+ expr:
+ description:
+ - The query expression.
+ - See U(http://jmespath.org/examples.html) for examples.
+ type: string
+ required: true
+requirements:
+ - jmespath
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Define data to work on in the examples below
ansible.builtin.set_fact:
domain_definition:
@@ -99,13 +97,13 @@ EXAMPLES = '''
msg: "{{ domain_definition | to_json | from_json | community.general.json_query(server_name_query) }}"
vars:
server_name_query: "domain.server[?contains(name,'server1')].port"
-'''
+"""
-RETURN = '''
- _value:
- description: The result of the query.
- type: any
-'''
+RETURN = r"""
+_value:
+ description: The result of the query.
+ type: any
+"""
from ansible.errors import AnsibleError, AnsibleFilterError
@@ -125,17 +123,24 @@ def json_query(data, expr):
'json_query filter')
# Hack to handle Ansible Unsafe text, AnsibleMapping and AnsibleSequence
- # See issue: https://github.com/ansible-collections/community.general/issues/320
- jmespath.functions.REVERSE_TYPES_MAP['string'] = jmespath.functions.REVERSE_TYPES_MAP['string'] + ('AnsibleUnicode', 'AnsibleUnsafeText', )
- jmespath.functions.REVERSE_TYPES_MAP['array'] = jmespath.functions.REVERSE_TYPES_MAP['array'] + ('AnsibleSequence', )
- jmespath.functions.REVERSE_TYPES_MAP['object'] = jmespath.functions.REVERSE_TYPES_MAP['object'] + ('AnsibleMapping', )
+ # See issues https://github.com/ansible-collections/community.general/issues/320
+ # and https://github.com/ansible/ansible/issues/85600.
+ jmespath.functions.REVERSE_TYPES_MAP['string'] = jmespath.functions.REVERSE_TYPES_MAP['string'] + (
+ 'AnsibleUnicode', 'AnsibleUnsafeText', '_AnsibleTaggedStr',
+ )
+ jmespath.functions.REVERSE_TYPES_MAP['array'] = jmespath.functions.REVERSE_TYPES_MAP['array'] + (
+ 'AnsibleSequence', '_AnsibleLazyTemplateList',
+ )
+ jmespath.functions.REVERSE_TYPES_MAP['object'] = jmespath.functions.REVERSE_TYPES_MAP['object'] + (
+ 'AnsibleMapping', '_AnsibleLazyTemplateDict',
+ )
try:
return jmespath.search(expr, data)
except jmespath.exceptions.JMESPathError as e:
- raise AnsibleFilterError('JMESPathError in json_query filter plugin:\n%s' % e)
+ raise AnsibleFilterError(f'JMESPathError in json_query filter plugin:\n{e}')
except Exception as e:
# For older jmespath, we can get ValueError and TypeError without much info.
- raise AnsibleFilterError('Error in jmespath.search in json_query filter plugin:\n%s' % e)
+ raise AnsibleFilterError(f'Error in jmespath.search in json_query filter plugin:\n{e}')
class FilterModule(object):
diff --git a/plugins/filter/keep_keys.py b/plugins/filter/keep_keys.py
new file mode 100644
index 0000000000..18876789d6
--- /dev/null
+++ b/plugins/filter/keep_keys.py
@@ -0,0 +1,136 @@
+# Copyright (c) 2024 Vladimir Botka
+# Copyright (c) 2024 Felix Fontein
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import annotations
+
+DOCUMENTATION = r"""
+name: keep_keys
+short_description: Keep specific keys from dictionaries in a list
+version_added: "9.1.0"
+author:
+ - Vladimir Botka (@vbotka)
+ - Felix Fontein (@felixfontein)
+description: This filter keeps only specified keys from a provided list of dictionaries.
+options:
+ _input:
+ description:
+ - A list of dictionaries.
+ - Top level keys must be strings.
+ type: list
+ elements: dictionary
+ required: true
+ target:
+ description:
+ - A single key or key pattern to keep, or a list of keys or keys patterns to keep.
+ - If O(matching_parameter=regex) there must be exactly one pattern provided.
+ type: raw
+ required: true
+ matching_parameter:
+ description: Specify the matching option of target keys.
+ type: str
+ default: equal
+ choices:
+ equal: Matches keys of exactly one of the O(target) items.
+ starts_with: Matches keys that start with one of the O(target) items.
+ ends_with: Matches keys that end with one of the O(target) items.
+ regex:
+ - Matches keys that match the regular expresion provided in O(target).
+ - In this case, O(target) must be a regex string or a list with single regex string.
+"""
+
+EXAMPLES = r"""
+- l:
+ - {k0_x0: A0, k1_x1: B0, k2_x2: [C0], k3_x3: foo}
+ - {k0_x0: A1, k1_x1: B1, k2_x2: [C1], k3_x3: bar}
+
+ # 1) By default match keys that equal any of the items in the target.
+- t: [k0_x0, k1_x1]
+ r: "{{ l | community.general.keep_keys(target=t) }}"
+
+ # 2) Match keys that start with any of the items in the target.
+- t: [k0, k1]
+ r: "{{ l | community.general.keep_keys(target=t, matching_parameter='starts_with') }}"
+
+ # 3) Match keys that end with any of the items in target.
+- t: [x0, x1]
+ r: "{{ l | community.general.keep_keys(target=t, matching_parameter='ends_with') }}"
+
+ # 4) Match keys by the regex.
+- t: ['^.*[01]_x.*$']
+ r: "{{ l | community.general.keep_keys(target=t, matching_parameter='regex') }}"
+
+ # 5) Match keys by the regex.
+- t: '^.*[01]_x.*$'
+ r: "{{ l | community.general.keep_keys(target=t, matching_parameter='regex') }}"
+
+ # The results of above examples 1-5 are all the same.
+- r:
+ - {k0_x0: A0, k1_x1: B0}
+ - {k0_x0: A1, k1_x1: B1}
+
+ # 6) By default match keys that equal the target.
+- t: k0_x0
+ r: "{{ l | community.general.keep_keys(target=t) }}"
+
+ # 7) Match keys that start with the target.
+- t: k0
+ r: "{{ l | community.general.keep_keys(target=t, matching_parameter='starts_with') }}"
+
+ # 8) Match keys that end with the target.
+- t: x0
+ r: "{{ l | community.general.keep_keys(target=t, matching_parameter='ends_with') }}"
+
+ # 9) Match keys by the regex.
+- t: '^.*0_x.*$'
+ r: "{{ l | community.general.keep_keys(target=t, matching_parameter='regex') }}"
+
+ # The results of above examples 6-9 are all the same.
+- r:
+ - {k0_x0: A0}
+ - {k0_x0: A1}
+"""
+
+RETURN = r"""
+_value:
+ description: The list of dictionaries with selected keys.
+ type: list
+ elements: dictionary
+"""
+
+from ansible_collections.community.general.plugins.plugin_utils.keys_filter import (
+ _keys_filter_params,
+ _keys_filter_target_str)
+
+
+def keep_keys(data, target=None, matching_parameter='equal'):
+ """keep specific keys from dictionaries in a list"""
+
+ # test parameters
+ _keys_filter_params(data, matching_parameter)
+ # test and transform target
+ tt = _keys_filter_target_str(target, matching_parameter)
+
+ if matching_parameter == 'equal':
+ def keep_key(key):
+ return key in tt
+ elif matching_parameter == 'starts_with':
+ def keep_key(key):
+ return key.startswith(tt)
+ elif matching_parameter == 'ends_with':
+ def keep_key(key):
+ return key.endswith(tt)
+ elif matching_parameter == 'regex':
+ def keep_key(key):
+ return tt.match(key) is not None
+
+ return [{k: v for k, v in d.items() if keep_key(k)} for d in data]
+
+
+class FilterModule(object):
+
+ def filters(self):
+ return {
+ 'keep_keys': keep_keys,
+ }
diff --git a/plugins/filter/lists.py b/plugins/filter/lists.py
new file mode 100644
index 0000000000..0bae08f24c
--- /dev/null
+++ b/plugins/filter/lists.py
@@ -0,0 +1,200 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import annotations
+
+from ansible.errors import AnsibleFilterError
+from ansible.module_utils.common.collections import is_sequence
+
+
+def remove_duplicates(lst):
+ seen = set()
+ seen_add = seen.add
+ result = []
+ for item in lst:
+ try:
+ if item not in seen:
+ seen_add(item)
+ result.append(item)
+ except TypeError:
+ # This happens for unhashable values `item`. If this happens,
+ # convert `seen` to a list and continue.
+ seen = list(seen)
+ seen_add = seen.append
+ if item not in seen:
+ seen_add(item)
+ result.append(item)
+ return result
+
+
+def flatten_list(lst):
+ result = []
+ for sublist in lst:
+ if not is_sequence(sublist):
+ msg = ("All arguments must be lists. %s is %s")
+ raise AnsibleFilterError(msg % (sublist, type(sublist)))
+ if len(sublist) > 0:
+ if all(is_sequence(sub) for sub in sublist):
+ for item in sublist:
+ result.append(item)
+ else:
+ result.append(sublist)
+ return result
+
+
+def lists_union(*args, **kwargs):
+ lists = args
+ flatten = kwargs.pop('flatten', False)
+
+ if kwargs:
+ # Some unused kwargs remain
+ raise AnsibleFilterError(
+ f"lists_union() got unexpected keywords arguments: {', '.join(kwargs.keys())}"
+ )
+
+ if flatten:
+ lists = flatten_list(args)
+
+ if not lists:
+ return []
+
+ if len(lists) == 1:
+ return lists[0]
+
+ a = lists[0]
+ for b in lists[1:]:
+ a = do_union(a, b)
+ return remove_duplicates(a)
+
+
+def do_union(a, b):
+ return a + b
+
+
+def lists_intersect(*args, **kwargs):
+ lists = args
+ flatten = kwargs.pop('flatten', False)
+
+ if kwargs:
+ # Some unused kwargs remain
+ raise AnsibleFilterError(
+ f"lists_intersect() got unexpected keywords arguments: {', '.join(kwargs.keys())}"
+ )
+
+ if flatten:
+ lists = flatten_list(args)
+
+ if not lists:
+ return []
+
+ if len(lists) == 1:
+ return lists[0]
+
+ a = remove_duplicates(lists[0])
+ for b in lists[1:]:
+ a = do_intersect(a, b)
+ return a
+
+
+def do_intersect(a, b):
+ isect = []
+ try:
+ other = set(b)
+ isect = [item for item in a if item in other]
+ except TypeError:
+ # This happens for unhashable values,
+ # use a list instead and redo.
+ other = list(b)
+ isect = [item for item in a if item in other]
+ return isect
+
+
+def lists_difference(*args, **kwargs):
+ lists = args
+ flatten = kwargs.pop('flatten', False)
+
+ if kwargs:
+ # Some unused kwargs remain
+ raise AnsibleFilterError(
+ f"lists_difference() got unexpected keywords arguments: {', '.join(kwargs.keys())}"
+ )
+
+ if flatten:
+ lists = flatten_list(args)
+
+ if not lists:
+ return []
+
+ if len(lists) == 1:
+ return lists[0]
+
+ a = remove_duplicates(lists[0])
+ for b in lists[1:]:
+ a = do_difference(a, b)
+ return a
+
+
+def do_difference(a, b):
+ diff = []
+ try:
+ other = set(b)
+ diff = [item for item in a if item not in other]
+ except TypeError:
+ # This happens for unhashable values,
+ # use a list instead and redo.
+ other = list(b)
+ diff = [item for item in a if item not in other]
+ return diff
+
+
+def lists_symmetric_difference(*args, **kwargs):
+ lists = args
+ flatten = kwargs.pop('flatten', False)
+
+ if kwargs:
+ # Some unused kwargs remain
+ raise AnsibleFilterError(
+ f"lists_difference() got unexpected keywords arguments: {', '.join(kwargs.keys())}"
+ )
+
+ if flatten:
+ lists = flatten_list(args)
+
+ if not lists:
+ return []
+
+ if len(lists) == 1:
+ return lists[0]
+
+ a = lists[0]
+ for b in lists[1:]:
+ a = do_symmetric_difference(a, b)
+ return a
+
+
+def do_symmetric_difference(a, b):
+ sym_diff = []
+ union = lists_union(a, b)
+ try:
+ isect = set(a) & set(b)
+ sym_diff = [item for item in union if item not in isect]
+ except TypeError:
+ # This happens for unhashable values,
+ # build the intersection of `a` and `b` backed
+ # by a list instead of a set and redo.
+ isect = lists_intersect(a, b)
+ sym_diff = [item for item in union if item not in isect]
+ return sym_diff
+
+
+class FilterModule(object):
+ ''' Ansible lists jinja2 filters '''
+
+ def filters(self):
+ return {
+ 'lists_union': lists_union,
+ 'lists_intersect': lists_intersect,
+ 'lists_difference': lists_difference,
+ 'lists_symmetric_difference': lists_symmetric_difference,
+ }
diff --git a/plugins/filter/lists_difference.yml b/plugins/filter/lists_difference.yml
new file mode 100644
index 0000000000..630e77cf0a
--- /dev/null
+++ b/plugins/filter/lists_difference.yml
@@ -0,0 +1,48 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+DOCUMENTATION:
+ name: lists_difference
+ short_description: Difference of lists with a predictive order
+ version_added: 8.4.0
+ description:
+ - Provide a unique list of all the elements from the first which do not appear in the other lists.
+ - The order of the items in the resulting list is preserved.
+ options:
+ _input:
+ description: A list.
+ type: list
+ elements: any
+ required: true
+ flatten:
+ description: Whether to remove one hierarchy level from the input list.
+ type: boolean
+ default: false
+ author:
+ - Christoph Fiehe (@cfiehe)
+
+EXAMPLES: |
+ - name: Return the difference of list1 and list2.
+ ansible.builtin.debug:
+ msg: "{{ list1 | community.general.lists_difference(list2) }}"
+ vars:
+ list1: [1, 2, 5, 3, 4, 10]
+ list2: [1, 2, 3, 4, 5, 11, 99]
+ # => [10]
+
+ - name: Return the difference of list1, list2 and list3.
+ ansible.builtin.debug:
+ msg: "{{ [list1, list2, list3] | community.general.lists_difference(flatten=true) }}"
+ vars:
+ list1: [1, 2, 5, 3, 4, 10]
+ list2: [1, 2, 3, 4, 5, 11, 99]
+ list3: [1, 2, 3, 4, 5, 10, 99, 101]
+ # => []
+
+RETURN:
+ _value:
+ description: A unique list of all the elements from the first list that do not appear on the other lists.
+ type: list
+ elements: any
diff --git a/plugins/filter/lists_intersect.yml b/plugins/filter/lists_intersect.yml
new file mode 100644
index 0000000000..d2ea9483b1
--- /dev/null
+++ b/plugins/filter/lists_intersect.yml
@@ -0,0 +1,48 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+DOCUMENTATION:
+ name: lists_intersect
+ short_description: Intersection of lists with a predictive order
+ version_added: 8.4.0
+ description:
+ - Provide a unique list of all the common elements of two or more lists.
+ - The order of the items in the resulting list is preserved.
+ options:
+ _input:
+ description: A list.
+ type: list
+ elements: any
+ required: true
+ flatten:
+ description: Whether to remove one hierarchy level from the input list.
+ type: boolean
+ default: false
+ author:
+ - Christoph Fiehe (@cfiehe)
+
+EXAMPLES: |
+ - name: Return the intersection of list1 and list2.
+ ansible.builtin.debug:
+ msg: "{{ list1 | community.general.lists_intersect(list2) }}"
+ vars:
+ list1: [1, 2, 5, 3, 4, 10]
+ list2: [1, 2, 3, 4, 5, 11, 99]
+ # => [1, 2, 5, 3, 4]
+
+ - name: Return the intersection of list1, list2 and list3.
+ ansible.builtin.debug:
+ msg: "{{ [list1, list2, list3] | community.general.lists_intersect(flatten=true) }}"
+ vars:
+ list1: [1, 2, 5, 3, 4, 10]
+ list2: [1, 2, 3, 4, 5, 11, 99]
+ list3: [1, 2, 3, 4, 5, 10, 99, 101]
+ # => [1, 2, 5, 3, 4]
+
+RETURN:
+ _value:
+ description: A unique list of all the common elements from the provided lists.
+ type: list
+ elements: any
diff --git a/plugins/filter/lists_mergeby.py b/plugins/filter/lists_mergeby.py
index a89039ed89..4b8bf971f4 100644
--- a/plugins/filter/lists_mergeby.py
+++ b/plugins/filter/lists_mergeby.py
@@ -1,122 +1,216 @@
-# -*- coding: utf-8 -*-
-# Copyright (c) 2020-2022, Vladimir Botka
+# Copyright (c) 2020-2024, Vladimir Botka
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
-DOCUMENTATION = '''
- name: lists_mergeby
- short_description: Merge two or more lists of dictionaries by a given attribute
- version_added: 2.0.0
- author: Vladimir Botka (@vbotka)
- description:
- - Merge two or more lists by attribute I(index). Optional parameters 'recursive' and 'list_merge'
- control the merging of the lists in values. The function merge_hash from ansible.utils.vars
- is used. To learn details on how to use the parameters 'recursive' and 'list_merge' see
- Ansible User's Guide chapter "Using filters to manipulate data" section "Combining
- hashes/dictionaries".
- positional: another_list, index
- options:
- _input:
- description: A list of dictionaries.
- type: list
- elements: dictionary
- required: true
- another_list:
- description: Another list of dictionaries. This parameter can be specified multiple times.
- type: list
- elements: dictionary
- index:
- description:
- - The dictionary key that must be present in every dictionary in every list that is used to
- merge the lists.
- type: string
- required: true
- recursive:
- description:
- - Should the combine recursively merge nested dictionaries (hashes).
- - "B(Note:) It does not depend on the value of the C(hash_behaviour) setting in C(ansible.cfg)."
- type: boolean
- default: false
- list_merge:
- description:
- - Modifies the behaviour when the dictionaries (hashes) to merge contain arrays/lists.
- type: string
- default: replace
- choices:
- - replace
- - keep
- - append
- - prepend
- - append_rp
- - prepend_rp
-'''
+DOCUMENTATION = r"""
+name: lists_mergeby
+short_description: Merge two or more lists of dictionaries by a given attribute
+version_added: 2.0.0
+author: Vladimir Botka (@vbotka)
+description:
+ - Merge two or more lists by attribute O(index). Optional parameters O(recursive) and O(list_merge) control the merging
+ of the nested dictionaries and lists.
+ - The function C(merge_hash) from C(ansible.utils.vars) is used.
+ - To learn details on how to use the parameters O(recursive) and O(list_merge) see Ansible User's Guide chapter "Using filters
+ to manipulate data" section R(Combining hashes/dictionaries, combine_filter) or the filter P(ansible.builtin.combine#filter).
+positional: another_list, index
+options:
+ _input:
+ description:
+ - A list of dictionaries, or a list of lists of dictionaries.
+ - The required type of the C(elements) is set to C(raw) because all elements of O(_input) can be either dictionaries
+ or lists.
+ type: list
+ elements: raw
+ required: true
+ another_list:
+ description:
+ - Another list of dictionaries, or a list of lists of dictionaries.
+ - This parameter can be specified multiple times.
+ type: list
+ elements: raw
+ index:
+ description:
+ - The dictionary key that must be present in every dictionary in every list that is used to merge the lists.
+ type: string
+ required: true
+ recursive:
+ description:
+ - Should the combine recursively merge nested dictionaries (hashes).
+ - B(Note:) It does not depend on the value of the C(hash_behaviour) setting in C(ansible.cfg).
+ type: boolean
+ default: false
+ list_merge:
+ description:
+ - Modifies the behaviour when the dictionaries (hashes) to merge contain arrays/lists.
+ type: string
+ default: replace
+ choices:
+ - replace
+ - keep
+ - append
+ - prepend
+ - append_rp
+ - prepend_rp
+"""
-EXAMPLES = '''
-- name: Merge two lists
+EXAMPLES = r"""
+# Some results below are manually formatted for better readability. The
+# dictionaries' keys will be sorted alphabetically in real output.
+
+- name: Example 1. Merge two lists. The results r1 and r2 are the same.
ansible.builtin.debug:
- msg: >-
- {{ list1 | community.general.lists_mergeby(
- list2,
- 'index',
- recursive=True,
- list_merge='append'
- ) }}"
+ msg: |
+ r1: {{ r1 }}
+ r2: {{ r2 }}
vars:
list1:
- - index: a
- value: 123
- - index: b
- value: 42
+ - {index: a, value: 123}
+ - {index: b, value: 4}
list2:
- - index: a
- foo: bar
- - index: c
- foo: baz
- # Produces the following list of dictionaries:
- # {
- # "index": "a",
- # "foo": "bar",
- # "value": 123
- # },
- # {
- # "index": "b",
- # "value": 42
- # },
- # {
- # "index": "c",
- # "foo": "baz"
- # }
-'''
+ - {index: a, foo: bar}
+ - {index: c, foo: baz}
+ r1: "{{ list1 | community.general.lists_mergeby(list2, 'index') }}"
+ r2: "{{ [list1, list2] | community.general.lists_mergeby('index') }}"
-RETURN = '''
- _value:
- description: The merged list.
- type: list
- elements: dictionary
-'''
+# r1:
+# - {index: a, foo: bar, value: 123}
+# - {index: b, value: 4}
+# - {index: c, foo: baz}
+# r2:
+# - {index: a, foo: bar, value: 123}
+# - {index: b, value: 4}
+# - {index: c, foo: baz}
+
+- name: Example 2. Merge three lists
+ ansible.builtin.debug:
+ var: r
+ vars:
+ list1:
+ - {index: a, value: 123}
+ - {index: b, value: 4}
+ list2:
+ - {index: a, foo: bar}
+ - {index: c, foo: baz}
+ list3:
+ - {index: d, foo: qux}
+ r: "{{ [list1, list2, list3] | community.general.lists_mergeby('index') }}"
+
+# r:
+# - {index: a, foo: bar, value: 123}
+# - {index: b, value: 4}
+# - {index: c, foo: baz}
+# - {index: d, foo: qux}
+
+- name: Example 3. Merge single list. The result is the same as 2.
+ ansible.builtin.debug:
+ var: r
+ vars:
+ list1:
+ - {index: a, value: 123}
+ - {index: b, value: 4}
+ - {index: a, foo: bar}
+ - {index: c, foo: baz}
+ - {index: d, foo: qux}
+ r: "{{ [list1, []] | community.general.lists_mergeby('index') }}"
+
+# r:
+# - {index: a, foo: bar, value: 123}
+# - {index: b, value: 4}
+# - {index: c, foo: baz}
+# - {index: d, foo: qux}
+
+- name: Example 4. Merge two lists. By default, replace nested lists.
+ ansible.builtin.debug:
+ var: r
+ vars:
+ list1:
+ - {index: a, foo: [X1, X2]}
+ - {index: b, foo: [X1, X2]}
+ list2:
+ - {index: a, foo: [Y1, Y2]}
+ - {index: b, foo: [Y1, Y2]}
+ r: "{{ [list1, list2] | community.general.lists_mergeby('index') }}"
+
+# r:
+# - {index: a, foo: [Y1, Y2]}
+# - {index: b, foo: [Y1, Y2]}
+
+- name: Example 5. Merge two lists. Append nested lists.
+ ansible.builtin.debug:
+ var: r
+ vars:
+ list1:
+ - {index: a, foo: [X1, X2]}
+ - {index: b, foo: [X1, X2]}
+ list2:
+ - {index: a, foo: [Y1, Y2]}
+ - {index: b, foo: [Y1, Y2]}
+ r: "{{ [list1, list2] | community.general.lists_mergeby('index', list_merge='append') }}"
+
+# r:
+# - {index: a, foo: [X1, X2, Y1, Y2]}
+# - {index: b, foo: [X1, X2, Y1, Y2]}
+
+- name: Example 6. Merge two lists. By default, do not merge nested dictionaries.
+ ansible.builtin.debug:
+ var: r
+ vars:
+ list1:
+ - {index: a, foo: {x: 1, y: 2}}
+ - {index: b, foo: [X1, X2]}
+ list2:
+ - {index: a, foo: {y: 3, z: 4}}
+ - {index: b, foo: [Y1, Y2]}
+ r: "{{ [list1, list2] | community.general.lists_mergeby('index') }}"
+
+# r:
+# - {index: a, foo: {y: 3, z: 4}}
+# - {index: b, foo: [Y1, Y2]}
+
+- name: Example 7. Merge two lists. Merge nested dictionaries too.
+ ansible.builtin.debug:
+ var: r
+ vars:
+ list1:
+ - {index: a, foo: {x: 1, y: 2}}
+ - {index: b, foo: [X1, X2]}
+ list2:
+ - {index: a, foo: {y: 3, z: 4}}
+ - {index: b, foo: [Y1, Y2]}
+ r: "{{ [list1, list2] | community.general.lists_mergeby('index', recursive=true) }}"
+
+# r:
+# - {index: a, foo: {x:1, y: 3, z: 4}}
+# - {index: b, foo: [Y1, Y2]}
+"""
+
+RETURN = r"""
+_value:
+ description: The merged list.
+ type: list
+ elements: dictionary
+"""
from ansible.errors import AnsibleFilterError
-from ansible.module_utils.six import string_types
-from ansible.module_utils.common._collections_compat import Mapping, Sequence
+from collections.abc import Mapping, Sequence
from ansible.utils.vars import merge_hash
-from ansible.release import __version__ as ansible_version
-from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
from collections import defaultdict
from operator import itemgetter
def list_mergeby(x, y, index, recursive=False, list_merge='replace'):
- ''' Merge 2 lists by attribute 'index'. The function merge_hash from ansible.utils.vars is used.
- This function is used by the function lists_mergeby.
+ '''Merge 2 lists by attribute 'index'. The function 'merge_hash'
+ from ansible.utils.vars is used. This function is used by the
+ function lists_mergeby.
'''
d = defaultdict(dict)
- for l in (x, y):
- for elem in l:
+ for lst in (x, y):
+ for elem in lst:
if not isinstance(elem, Mapping):
msg = "Elements of list arguments for lists_mergeby must be dictionaries. %s is %s"
raise AnsibleFilterError(msg % (elem, type(elem)))
@@ -126,20 +220,9 @@ def list_mergeby(x, y, index, recursive=False, list_merge='replace'):
def lists_mergeby(*terms, **kwargs):
- ''' Merge 2 or more lists by attribute 'index'. Optional parameters 'recursive' and 'list_merge'
- control the merging of the lists in values. The function merge_hash from ansible.utils.vars
- is used. To learn details on how to use the parameters 'recursive' and 'list_merge' see
- Ansible User's Guide chapter "Using filters to manipulate data" section "Combining
- hashes/dictionaries".
-
- Example:
- - debug:
- msg: "{{ list1|
- community.general.lists_mergeby(list2,
- 'index',
- recursive=True,
- list_merge='append')|
- list }}"
+ '''Merge 2 or more lists by attribute 'index'. To learn details
+ on how to use the parameters 'recursive' and 'list_merge' see
+ the filter ansible.builtin.combine.
'''
recursive = kwargs.pop('recursive', False)
@@ -157,7 +240,7 @@ def lists_mergeby(*terms, **kwargs):
"must be lists. %s is %s")
raise AnsibleFilterError(msg % (sublist, type(sublist)))
if len(sublist) > 0:
- if all(isinstance(l, Sequence) for l in sublist):
+ if all(isinstance(lst, Sequence) for lst in sublist):
for item in sublist:
flat_list.append(item)
else:
@@ -172,7 +255,7 @@ def lists_mergeby(*terms, **kwargs):
index = terms[-1]
- if not isinstance(index, string_types):
+ if not isinstance(index, str):
msg = ("First argument after the lists for community.general.lists_mergeby must be string. "
"%s is %s")
raise AnsibleFilterError(msg % (index, type(index)))
diff --git a/plugins/filter/lists_symmetric_difference.yml b/plugins/filter/lists_symmetric_difference.yml
new file mode 100644
index 0000000000..abd8caab8a
--- /dev/null
+++ b/plugins/filter/lists_symmetric_difference.yml
@@ -0,0 +1,48 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+DOCUMENTATION:
+ name: lists_symmetric_difference
+ short_description: Symmetric Difference of lists with a predictive order
+ version_added: 8.4.0
+ description:
+ - Provide a unique list containing the symmetric difference of two or more lists.
+ - The order of the items in the resulting list is preserved.
+ options:
+ _input:
+ description: A list.
+ type: list
+ elements: any
+ required: true
+ flatten:
+ description: Whether to remove one hierarchy level from the input list.
+ type: boolean
+ default: false
+ author:
+ - Christoph Fiehe (@cfiehe)
+
+EXAMPLES: |
+ - name: Return the symmetric difference of list1 and list2.
+ ansible.builtin.debug:
+ msg: "{{ list1 | community.general.lists_symmetric_difference(list2) }}"
+ vars:
+ list1: [1, 2, 5, 3, 4, 10]
+ list2: [1, 2, 3, 4, 5, 11, 99]
+ # => [10, 11, 99]
+
+ - name: Return the symmetric difference of list1, list2 and list3.
+ ansible.builtin.debug:
+ msg: "{{ [list1, list2, list3] | community.general.lists_symmetric_difference(flatten=true) }}"
+ vars:
+ list1: [1, 2, 5, 3, 4, 10]
+ list2: [1, 2, 3, 4, 5, 11, 99]
+ list3: [1, 2, 3, 4, 5, 10, 99, 101]
+ # => [11, 1, 2, 3, 4, 5, 101]
+
+RETURN:
+ _value:
+ description: A unique list containing the symmetric difference of two or more lists.
+ type: list
+ elements: any
diff --git a/plugins/filter/lists_union.yml b/plugins/filter/lists_union.yml
new file mode 100644
index 0000000000..8c1ffb4f87
--- /dev/null
+++ b/plugins/filter/lists_union.yml
@@ -0,0 +1,48 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+DOCUMENTATION:
+ name: lists_union
+ short_description: Union of lists with a predictive order
+ version_added: 8.4.0
+ description:
+ - Provide a unique list of all the elements of two or more lists.
+ - The order of the items in the resulting list is preserved.
+ options:
+ _input:
+ description: A list.
+ type: list
+ elements: any
+ required: true
+ flatten:
+ description: Whether to remove one hierarchy level from the input list.
+ type: boolean
+ default: false
+ author:
+ - Christoph Fiehe (@cfiehe)
+
+EXAMPLES: |
+ - name: Return the union of list1, list2 and list3.
+ ansible.builtin.debug:
+ msg: "{{ list1 | community.general.lists_union(list2, list3) }}"
+ vars:
+ list1: [1, 2, 5, 3, 4, 10]
+ list2: [1, 2, 3, 4, 5, 11, 99]
+ list3: [1, 2, 3, 4, 5, 10, 99, 101]
+ # => [1, 2, 5, 3, 4, 10, 11, 99, 101]
+
+ - name: Return the union of list1 and list2.
+ ansible.builtin.debug:
+ msg: "{{ [list1, list2] | community.general.lists_union(flatten=true) }}"
+ vars:
+ list1: [1, 2, 5, 3, 4, 10]
+ list2: [1, 2, 3, 4, 5, 11, 99]
+ # => [1, 2, 5, 3, 4, 10, 11, 99]
+
+RETURN:
+ _value:
+ description: A unique list of all the elements from the provided lists.
+ type: list
+ elements: any
diff --git a/plugins/filter/random_mac.py b/plugins/filter/random_mac.py
index 662c62b07c..e5e6201f1c 100644
--- a/plugins/filter/random_mac.py
+++ b/plugins/filter/random_mac.py
@@ -1,31 +1,29 @@
-# -*- coding: utf-8 -*-
# Copyright (c) 2020 Ansible Project
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
# Make coding more python3-ish
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
-DOCUMENTATION = '''
- name: random_mac
- short_description: Generate a random MAC address
- description:
- - Generates random networking interfaces MAC addresses for a given prefix.
- options:
- _input:
- description: A string prefix to use as a basis for the random MAC generated.
- type: string
- required: true
- seed:
- description:
- - A randomization seed to initialize the process, used to get repeatable results.
- - If no seed is provided, a system random source such as C(/dev/urandom) is used.
- required: false
- type: string
-'''
+DOCUMENTATION = r"""
+name: random_mac
+short_description: Generate a random MAC address
+description:
+ - Generates random networking interfaces MAC addresses for a given prefix.
+options:
+ _input:
+ description: A string prefix to use as a basis for the random MAC generated.
+ type: string
+ required: true
+ seed:
+ description:
+ - A randomization seed to initialize the process, used to get repeatable results.
+ - If no seed is provided, a system random source such as C(/dev/urandom) is used.
+ required: false
+ type: string
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Random MAC given a prefix
ansible.builtin.debug:
msg: "{{ '52:54:00' | community.general.random_mac }}"
@@ -34,35 +32,32 @@ EXAMPLES = '''
- name: With a seed
ansible.builtin.debug:
msg: "{{ '52:54:00' | community.general.random_mac(seed=inventory_hostname) }}"
-'''
+"""
-RETURN = '''
- _value:
- description: The generated MAC.
- type: string
-'''
+RETURN = r"""
+_value:
+ description: The generated MAC.
+ type: string
+"""
import re
from random import Random, SystemRandom
from ansible.errors import AnsibleFilterError
-from ansible.module_utils.six import string_types
def random_mac(value, seed=None):
''' takes string prefix, and return it completed with random bytes
to get a complete 6 bytes MAC address '''
- if not isinstance(value, string_types):
- raise AnsibleFilterError('Invalid value type (%s) for random_mac (%s)' %
- (type(value), value))
+ if not isinstance(value, str):
+ raise AnsibleFilterError(f'Invalid value type ({type(value)}) for random_mac ({value})')
value = value.lower()
mac_items = value.split(':')
if len(mac_items) > 5:
- raise AnsibleFilterError('Invalid value (%s) for random_mac: 5 colon(:) separated'
- ' items max' % value)
+ raise AnsibleFilterError(f'Invalid value ({value}) for random_mac: 5 colon(:) separated items max')
err = ""
for mac in mac_items:
@@ -70,11 +65,11 @@ def random_mac(value, seed=None):
err += ",empty item"
continue
if not re.match('[a-f0-9]{2}', mac):
- err += ",%s not hexa byte" % mac
+ err += f",{mac} not hexa byte"
err = err.strip(',')
if err:
- raise AnsibleFilterError('Invalid value (%s) for random_mac: %s' % (value, err))
+ raise AnsibleFilterError(f'Invalid value ({value}) for random_mac: {err}')
if seed is None:
r = SystemRandom()
@@ -84,7 +79,7 @@ def random_mac(value, seed=None):
v = r.randint(68719476736, 1099511627775)
# Select first n chars to complement input prefix
remain = 2 * (6 - len(mac_items))
- rnd = ('%x' % v)[:remain]
+ rnd = f'{v:x}'[:remain]
return value + re.sub(r'(..)', r':\1', rnd)
diff --git a/plugins/filter/remove_keys.py b/plugins/filter/remove_keys.py
new file mode 100644
index 0000000000..fc134b41d0
--- /dev/null
+++ b/plugins/filter/remove_keys.py
@@ -0,0 +1,136 @@
+# Copyright (c) 2024 Vladimir Botka
+# Copyright (c) 2024 Felix Fontein
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import annotations
+
+DOCUMENTATION = r"""
+name: remove_keys
+short_description: Remove specific keys from dictionaries in a list
+version_added: "9.1.0"
+author:
+ - Vladimir Botka (@vbotka)
+ - Felix Fontein (@felixfontein)
+description: This filter removes only specified keys from a provided list of dictionaries.
+options:
+ _input:
+ description:
+ - A list of dictionaries.
+ - Top level keys must be strings.
+ type: list
+ elements: dictionary
+ required: true
+ target:
+ description:
+ - A single key or key pattern to remove, or a list of keys or keys patterns to remove.
+ - If O(matching_parameter=regex) there must be exactly one pattern provided.
+ type: raw
+ required: true
+ matching_parameter:
+ description: Specify the matching option of target keys.
+ type: str
+ default: equal
+ choices:
+ equal: Matches keys of exactly one of the O(target) items.
+ starts_with: Matches keys that start with one of the O(target) items.
+ ends_with: Matches keys that end with one of the O(target) items.
+ regex:
+ - Matches keys that match the regular expresion provided in O(target).
+ - In this case, O(target) must be a regex string or a list with single regex string.
+"""
+
+EXAMPLES = r"""
+- l:
+ - {k0_x0: A0, k1_x1: B0, k2_x2: [C0], k3_x3: foo}
+ - {k0_x0: A1, k1_x1: B1, k2_x2: [C1], k3_x3: bar}
+
+ # 1) By default match keys that equal any of the items in the target.
+- t: [k0_x0, k1_x1]
+ r: "{{ l | community.general.remove_keys(target=t) }}"
+
+ # 2) Match keys that start with any of the items in the target.
+- t: [k0, k1]
+ r: "{{ l | community.general.remove_keys(target=t, matching_parameter='starts_with') }}"
+
+ # 3) Match keys that end with any of the items in target.
+- t: [x0, x1]
+ r: "{{ l | community.general.remove_keys(target=t, matching_parameter='ends_with') }}"
+
+ # 4) Match keys by the regex.
+- t: ['^.*[01]_x.*$']
+ r: "{{ l | community.general.remove_keys(target=t, matching_parameter='regex') }}"
+
+ # 5) Match keys by the regex.
+- t: '^.*[01]_x.*$'
+ r: "{{ l | community.general.remove_keys(target=t, matching_parameter='regex') }}"
+
+ # The results of above examples 1-5 are all the same.
+- r:
+ - {k2_x2: [C0], k3_x3: foo}
+ - {k2_x2: [C1], k3_x3: bar}
+
+ # 6) By default match keys that equal the target.
+- t: k0_x0
+ r: "{{ l | community.general.remove_keys(target=t) }}"
+
+ # 7) Match keys that start with the target.
+- t: k0
+ r: "{{ l | community.general.remove_keys(target=t, matching_parameter='starts_with') }}"
+
+ # 8) Match keys that end with the target.
+- t: x0
+ r: "{{ l | community.general.remove_keys(target=t, matching_parameter='ends_with') }}"
+
+ # 9) Match keys by the regex.
+- t: '^.*0_x.*$'
+ r: "{{ l | community.general.remove_keys(target=t, matching_parameter='regex') }}"
+
+ # The results of above examples 6-9 are all the same.
+- r:
+ - {k1_x1: B0, k2_x2: [C0], k3_x3: foo}
+ - {k1_x1: B1, k2_x2: [C1], k3_x3: bar}
+"""
+
+RETURN = r"""
+_value:
+ description: The list of dictionaries with selected keys removed.
+ type: list
+ elements: dictionary
+"""
+
+from ansible_collections.community.general.plugins.plugin_utils.keys_filter import (
+ _keys_filter_params,
+ _keys_filter_target_str)
+
+
+def remove_keys(data, target=None, matching_parameter='equal'):
+ """remove specific keys from dictionaries in a list"""
+
+ # test parameters
+ _keys_filter_params(data, matching_parameter)
+ # test and transform target
+ tt = _keys_filter_target_str(target, matching_parameter)
+
+ if matching_parameter == 'equal':
+ def keep_key(key):
+ return key not in tt
+ elif matching_parameter == 'starts_with':
+ def keep_key(key):
+ return not key.startswith(tt)
+ elif matching_parameter == 'ends_with':
+ def keep_key(key):
+ return not key.endswith(tt)
+ elif matching_parameter == 'regex':
+ def keep_key(key):
+ return tt.match(key) is None
+
+ return [{k: v for k, v in d.items() if keep_key(k)} for d in data]
+
+
+class FilterModule(object):
+
+ def filters(self):
+ return {
+ 'remove_keys': remove_keys,
+ }
diff --git a/plugins/filter/replace_keys.py b/plugins/filter/replace_keys.py
new file mode 100644
index 0000000000..5af0b22f62
--- /dev/null
+++ b/plugins/filter/replace_keys.py
@@ -0,0 +1,178 @@
+# Copyright (c) 2024 Vladimir Botka
+# Copyright (c) 2024 Felix Fontein
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import annotations
+
+DOCUMENTATION = r"""
+name: replace_keys
+short_description: Replace specific keys in a list of dictionaries
+version_added: "9.1.0"
+author:
+ - Vladimir Botka (@vbotka)
+ - Felix Fontein (@felixfontein)
+description: This filter replaces specified keys in a provided list of dictionaries.
+options:
+ _input:
+ description:
+ - A list of dictionaries.
+ - Top level keys must be strings.
+ type: list
+ elements: dictionary
+ required: true
+ target:
+ description:
+ - A list of dictionaries with attributes C(before) and C(after).
+ - The value of O(target[].after) replaces key matching O(target[].before).
+ type: list
+ elements: dictionary
+ required: true
+ suboptions:
+ before:
+ description:
+ - A key or key pattern to change.
+ - The interpretation of O(target[].before) depends on O(matching_parameter).
+ - For a key that matches multiple O(target[].before)s, the B(first) matching O(target[].after) is used.
+ type: str
+ after:
+ description: A matching key change to.
+ type: str
+ matching_parameter:
+ description: Specify the matching option of target keys.
+ type: str
+ default: equal
+ choices:
+ equal: Matches keys of exactly one of the O(target[].before) items.
+ starts_with: Matches keys that start with one of the O(target[].before) items.
+ ends_with: Matches keys that end with one of the O(target[].before) items.
+ regex: Matches keys that match one of the regular expressions provided in O(target[].before).
+"""
+
+EXAMPLES = r"""
+- l:
+ - {k0_x0: A0, k1_x1: B0, k2_x2: [C0], k3_x3: foo}
+ - {k0_x0: A1, k1_x1: B1, k2_x2: [C1], k3_x3: bar}
+
+ # 1) By default, replace keys that are equal any of the attributes before.
+- t:
+ - {before: k0_x0, after: a0}
+ - {before: k1_x1, after: a1}
+ r: "{{ l | community.general.replace_keys(target=t) }}"
+
+ # 2) Replace keys that starts with any of the attributes before.
+- t:
+ - {before: k0, after: a0}
+ - {before: k1, after: a1}
+ r: "{{ l | community.general.replace_keys(target=t, matching_parameter='starts_with') }}"
+
+ # 3) Replace keys that ends with any of the attributes before.
+- t:
+ - {before: x0, after: a0}
+ - {before: x1, after: a1}
+ r: "{{ l | community.general.replace_keys(target=t, matching_parameter='ends_with') }}"
+
+ # 4) Replace keys that match any regex of the attributes before.
+- t:
+ - {before: "^.*0_x.*$", after: a0}
+ - {before: "^.*1_x.*$", after: a1}
+ r: "{{ l | community.general.replace_keys(target=t, matching_parameter='regex') }}"
+
+ # The results of above examples 1-4 are all the same.
+- r:
+ - {a0: A0, a1: B0, k2_x2: [C0], k3_x3: foo}
+ - {a0: A1, a1: B1, k2_x2: [C1], k3_x3: bar}
+
+ # 5) If more keys match the same attribute before the last one will be used.
+- t:
+ - {before: "^.*_x.*$", after: X}
+ r: "{{ l | community.general.replace_keys(target=t, matching_parameter='regex') }}"
+
+ # gives
+
+- r:
+ - X: foo
+ - X: bar
+
+ # 6) If there are items with equal attribute before the first one will be used.
+- t:
+ - {before: "^.*_x.*$", after: X}
+ - {before: "^.*_x.*$", after: Y}
+ r: "{{ l | community.general.replace_keys(target=t, matching_parameter='regex') }}"
+
+ # gives
+
+- r:
+ - X: foo
+ - X: bar
+
+ # 7) If there are more matches for a key the first one will be used.
+- l:
+ - {aaa1: A, bbb1: B, ccc1: C}
+ - {aaa2: D, bbb2: E, ccc2: F}
+- t:
+ - {before: a, after: X}
+ - {before: aa, after: Y}
+ r: "{{ l | community.general.replace_keys(target=t, matching_parameter='starts_with') }}"
+
+ # gives
+
+- r:
+ - {X: A, bbb1: B, ccc1: C}
+ - {X: D, bbb2: E, ccc2: F}
+"""
+
+RETURN = r"""
+_value:
+ description: The list of dictionaries with replaced keys.
+ type: list
+ elements: dictionary
+"""
+
+from ansible_collections.community.general.plugins.plugin_utils.keys_filter import (
+ _keys_filter_params,
+ _keys_filter_target_dict)
+
+
+def replace_keys(data, target=None, matching_parameter='equal'):
+ """replace specific keys in a list of dictionaries"""
+
+ # test parameters
+ _keys_filter_params(data, matching_parameter)
+ # test and transform target
+ tz = _keys_filter_target_dict(target, matching_parameter)
+
+ if matching_parameter == 'equal':
+ def replace_key(key):
+ for b, a in tz:
+ if key == b:
+ return a
+ return key
+ elif matching_parameter == 'starts_with':
+ def replace_key(key):
+ for b, a in tz:
+ if key.startswith(b):
+ return a
+ return key
+ elif matching_parameter == 'ends_with':
+ def replace_key(key):
+ for b, a in tz:
+ if key.endswith(b):
+ return a
+ return key
+ elif matching_parameter == 'regex':
+ def replace_key(key):
+ for b, a in tz:
+ if b.match(key):
+ return a
+ return key
+
+ return [{replace_key(k): v for k, v in d.items()} for d in data]
+
+
+class FilterModule(object):
+
+ def filters(self):
+ return {
+ 'replace_keys': replace_keys,
+ }
diff --git a/plugins/filter/reveal_ansible_type.py b/plugins/filter/reveal_ansible_type.py
new file mode 100644
index 0000000000..e068702355
--- /dev/null
+++ b/plugins/filter/reveal_ansible_type.py
@@ -0,0 +1,147 @@
+# Copyright (c) 2024 Vladimir Botka
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import annotations
+
+DOCUMENTATION = r"""
+name: reveal_ansible_type
+short_description: Return input type
+version_added: "9.2.0"
+author: Vladimir Botka (@vbotka)
+description: This filter returns input type.
+options:
+ _input:
+ description: Input data.
+ type: raw
+ required: true
+ alias:
+ description: Data type aliases.
+ default: {}
+ type: dictionary
+"""
+
+EXAMPLES = r"""
+# Substitution converts str to AnsibleUnicode or _AnsibleTaggedStr
+# ----------------------------------------------------------------
+
+# String. AnsibleUnicode or _AnsibleTaggedStr.
+- data: "abc"
+ result: '{{ data | community.general.reveal_ansible_type }}'
+# result => AnsibleUnicode (or _AnsibleTaggedStr)
+
+# String. AnsibleUnicode/_AnsibleTaggedStr alias str.
+- alias: {"AnsibleUnicode": "str", "_AnsibleTaggedStr": "str"}
+ data: "abc"
+ result: '{{ data | community.general.reveal_ansible_type(alias) }}'
+# result => str
+
+# List. All items are AnsibleUnicode/_AnsibleTaggedStr.
+- data: ["a", "b", "c"]
+ result: '{{ data | community.general.reveal_ansible_type }}'
+# result => list[AnsibleUnicode] or list[_AnsibleTaggedStr]
+
+# Dictionary. All keys and values are AnsibleUnicode/_AnsibleTaggedStr.
+- data: {"a": "foo", "b": "bar", "c": "baz"}
+ result: '{{ data | community.general.reveal_ansible_type }}'
+# result => dict[AnsibleUnicode, AnsibleUnicode] or dict[_AnsibleTaggedStr, _AnsibleTaggedStr]
+
+# No substitution and no alias. Type of strings is str
+# ----------------------------------------------------
+
+# String
+- result: '{{ "abc" | community.general.reveal_ansible_type }}'
+# result => str
+
+# Integer
+- result: '{{ 123 | community.general.reveal_ansible_type }}'
+# result => int
+
+# Float
+- result: '{{ 123.45 | community.general.reveal_ansible_type }}'
+# result => float
+
+# Boolean
+- result: '{{ true | community.general.reveal_ansible_type }}'
+# result => bool
+
+# List. All items are strings.
+- result: '{{ ["a", "b", "c"] | community.general.reveal_ansible_type }}'
+# result => list[str]
+
+# List of dictionaries.
+- result: '{{ [{"a": 1}, {"b": 2}] | community.general.reveal_ansible_type }}'
+# result => list[dict]
+
+# Dictionary. All keys are strings. All values are integers.
+- result: '{{ {"a": 1} | community.general.reveal_ansible_type }}'
+# result => dict[str, int]
+
+# Dictionary. All keys are strings. All values are integers.
+- result: '{{ {"a": 1, "b": 2} | community.general.reveal_ansible_type }}'
+# result => dict[str, int]
+
+# Type of strings is AnsibleUnicode, _AnsibleTaggedStr, or str
+# ------------------------------------------------------------
+
+# Dictionary. The keys are integers or strings. All values are strings.
+- alias:
+ AnsibleUnicode: str
+ _AnsibleTaggedStr: str
+ _AnsibleTaggedInt: int
+ data: {1: 'a', 'b': 'b'}
+ result: '{{ data | community.general.reveal_ansible_type(alias) }}'
+# result => dict[int|str, str]
+
+# Dictionary. All keys are integers. All values are keys.
+- alias:
+ AnsibleUnicode: str
+ _AnsibleTaggedStr: str
+ _AnsibleTaggedInt: int
+ data: {1: 'a', 2: 'b'}
+ result: '{{ data | community.general.reveal_ansible_type(alias) }}'
+# result => dict[int, str]
+
+# Dictionary. All keys are strings. Multiple types values.
+- alias:
+ AnsibleUnicode: str
+ _AnsibleTaggedStr: str
+ _AnsibleTaggedInt: int
+ _AnsibleTaggedFloat: float
+ data: {'a': 1, 'b': 1.1, 'c': 'abc', 'd': true, 'e': ['x', 'y', 'z'], 'f': {'x': 1, 'y': 2}}
+ result: '{{ data | community.general.reveal_ansible_type(alias) }}'
+# result => dict[str, bool|dict|float|int|list|str]
+
+# List. Multiple types items.
+- alias:
+ AnsibleUnicode: str
+ _AnsibleTaggedStr: str
+ _AnsibleTaggedInt: int
+ _AnsibleTaggedFloat: float
+ data: [1, 2, 1.1, 'abc', true, ['x', 'y', 'z'], {'x': 1, 'y': 2}]
+ result: '{{ data | community.general.reveal_ansible_type(alias) }}'
+# result => list[bool|dict|float|int|list|str]
+"""
+
+RETURN = r"""
+_value:
+ description: Type of the data.
+ type: str
+"""
+
+from ansible_collections.community.general.plugins.plugin_utils.ansible_type import _ansible_type
+
+
+def reveal_ansible_type(data, alias=None):
+ """Returns data type"""
+
+ # TODO: expose use_native_type parameter
+ return _ansible_type(data, alias)
+
+
+class FilterModule(object):
+
+ def filters(self):
+ return {
+ 'reveal_ansible_type': reveal_ansible_type
+ }
diff --git a/plugins/filter/time.py b/plugins/filter/time.py
index 25970cd260..e48e24216a 100644
--- a/plugins/filter/time.py
+++ b/plugins/filter/time.py
@@ -1,10 +1,8 @@
-# -*- coding: utf-8 -*-
# Copyright (c) 2020, René Moser
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
import re
from ansible.errors import AnsibleFilterError
@@ -57,10 +55,10 @@ def to_time_unit(human_time, unit='ms', **kwargs):
unit = unit_to_short_form.get(unit.rstrip('s'), unit)
if unit not in unit_factors:
- raise AnsibleFilterError("to_time_unit() can not convert to the following unit: %s. "
- "Available units (singular or plural): %s. "
- "Available short units: %s"
- % (unit, ', '.join(unit_to_short_form.keys()), ', '.join(unit_factors.keys())))
+ raise AnsibleFilterError((
+ f"to_time_unit() can not convert to the following unit: {unit}. Available units (singular or plural):"
+ f"{', '.join(unit_to_short_form.keys())}. Available short units: {', '.join(unit_factors.keys())}"
+ ))
if 'year' in kwargs:
unit_factors['y'] = unit_factors['y'][:-1] + [kwargs.pop('year')]
@@ -68,14 +66,14 @@ def to_time_unit(human_time, unit='ms', **kwargs):
unit_factors['mo'] = unit_factors['mo'][:-1] + [kwargs.pop('month')]
if kwargs:
- raise AnsibleFilterError('to_time_unit() got unknown keyword arguments: %s' % ', '.join(kwargs.keys()))
+ raise AnsibleFilterError(f"to_time_unit() got unknown keyword arguments: {', '.join(kwargs.keys())}")
result = 0
for h_time_string in human_time.split():
res = re.match(r'(-?\d+)(\w+)', h_time_string)
if not res:
raise AnsibleFilterError(
- "to_time_unit() can not interpret following string: %s" % human_time)
+ f"to_time_unit() can not interpret following string: {human_time}")
h_time_int = int(res.group(1))
h_time_unit = res.group(2)
@@ -83,7 +81,7 @@ def to_time_unit(human_time, unit='ms', **kwargs):
h_time_unit = unit_to_short_form.get(h_time_unit.rstrip('s'), h_time_unit)
if h_time_unit not in unit_factors:
raise AnsibleFilterError(
- "to_time_unit() can not interpret following string: %s" % human_time)
+ f"to_time_unit() can not interpret following string: {human_time}")
time_in_milliseconds = h_time_int * multiply(unit_factors[h_time_unit])
result += time_in_milliseconds
diff --git a/plugins/filter/to_days.yml b/plugins/filter/to_days.yml
index 19bc8faf23..c76697f1ee 100644
--- a/plugins/filter/to_days.yml
+++ b/plugins/filter/to_days.yml
@@ -5,7 +5,7 @@
DOCUMENTATION:
name: to_days
- short_description: Converte a duration string to days
+ short_description: Converts a duration string to days
version_added: 0.2.0
description:
- Parse a human readable time duration string and convert to days.
@@ -13,12 +13,12 @@ DOCUMENTATION:
_input:
description:
- The time string to convert.
- - Can use the units C(y) and C(year) for a year, C(mo) and C(month) for a month, C(w) and C(week) for a week,
- C(d) and C(day) for a day, C(h) and C(hour) for a hour, C(m), C(min) and C(minute) for minutes, C(s), C(sec)
- and C(second) for seconds, C(ms), C(msec), C(msecond) and C(millisecond) for milliseconds. The suffix C(s)
- can be added to a unit as well, so C(seconds) is the same as C(second).
+ - Can use the units V(y) and V(year) for a year, V(mo) and V(month) for a month, V(w) and V(week) for a week,
+ V(d) and V(day) for a day, V(h) and V(hour) for a hour, V(m), V(min) and V(minute) for minutes, V(s), V(sec)
+ and V(second) for seconds, V(ms), V(msec), V(msecond) and V(millisecond) for milliseconds. The suffix V(s)
+ can be added to a unit as well, so V(seconds) is the same as V(second).
- Valid strings are space separated combinations of an integer with an optional minus sign and a unit.
- - Examples are C(1h), C(-5m), and C(3h -5m 6s).
+ - Examples are V(1h), V(-5m), and V(3h -5m 6s).
type: string
required: true
year:
diff --git a/plugins/filter/to_hours.yml b/plugins/filter/to_hours.yml
index 83826a5908..520740897b 100644
--- a/plugins/filter/to_hours.yml
+++ b/plugins/filter/to_hours.yml
@@ -5,7 +5,7 @@
DOCUMENTATION:
name: to_hours
- short_description: Converte a duration string to hours
+ short_description: Converts a duration string to hours
version_added: 0.2.0
description:
- Parse a human readable time duration string and convert to hours.
@@ -13,12 +13,12 @@ DOCUMENTATION:
_input:
description:
- The time string to convert.
- - Can use the units C(y) and C(year) for a year, C(mo) and C(month) for a month, C(w) and C(week) for a week,
- C(d) and C(day) for a day, C(h) and C(hour) for a hour, C(m), C(min) and C(minute) for minutes, C(s), C(sec)
- and C(second) for seconds, C(ms), C(msec), C(msecond) and C(millisecond) for milliseconds. The suffix C(s)
- can be added to a unit as well, so C(seconds) is the same as C(second).
+ - Can use the units V(y) and V(year) for a year, V(mo) and V(month) for a month, V(w) and V(week) for a week,
+ V(d) and V(day) for a day, V(h) and V(hour) for a hour, V(m), V(min) and V(minute) for minutes, V(s), V(sec)
+ and V(second) for seconds, V(ms), V(msec), V(msecond) and V(millisecond) for milliseconds. The suffix V(s)
+ can be added to a unit as well, so V(seconds) is the same as V(second).
- Valid strings are space separated combinations of an integer with an optional minus sign and a unit.
- - Examples are C(1h), C(-5m), and C(3h -5m 6s).
+ - Examples are V(1h), V(-5m), and V(3h -5m 6s).
type: string
required: true
year:
diff --git a/plugins/filter/to_ini.py b/plugins/filter/to_ini.py
new file mode 100644
index 0000000000..a70740b8aa
--- /dev/null
+++ b/plugins/filter/to_ini.py
@@ -0,0 +1,100 @@
+
+# Copyright (c) 2023, Steffen Scheib
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import annotations
+
+DOCUMENTATION = r"""
+name: to_ini
+short_description: Converts a dictionary to the INI file format
+version_added: 8.2.0
+author: Steffen Scheib (@sscheib)
+description:
+ - Converts a dictionary to the INI file format.
+options:
+ _input:
+ description: The dictionary that should be converted to the INI format.
+ type: dictionary
+ required: true
+"""
+
+EXAMPLES = r"""
+- name: Define a dictionary
+ ansible.builtin.set_fact:
+ my_dict:
+ section_name:
+ key_name: 'key value'
+
+ another_section:
+ connection: 'ssh'
+
+- name: Write dictionary to INI file
+ ansible.builtin.copy:
+ dest: /tmp/test.ini
+ content: '{{ my_dict | community.general.to_ini }}'
+
+ # /tmp/test.ini will look like this:
+ # [section_name]
+ # key_name = key value
+ #
+ # [another_section]
+ # connection = ssh
+"""
+
+RETURN = r"""
+_value:
+ description: A string formatted as INI file.
+ type: string
+"""
+
+from collections.abc import Mapping
+from configparser import ConfigParser
+from io import StringIO
+from ansible.errors import AnsibleFilterError
+
+
+class IniParser(ConfigParser):
+ ''' Implements a configparser which sets the correct optionxform '''
+
+ def __init__(self):
+ super().__init__(interpolation=None)
+ self.optionxform = str
+
+
+def to_ini(obj):
+ ''' Read the given dict and return an INI formatted string '''
+
+ if not isinstance(obj, Mapping):
+ raise AnsibleFilterError(f'to_ini requires a dict, got {type(obj)}')
+
+ ini_parser = IniParser()
+
+ try:
+ ini_parser.read_dict(obj)
+ except Exception as ex:
+ raise AnsibleFilterError('to_ini failed to parse given dict:'
+ f'{ex}', orig_exc=ex)
+
+ # catching empty dicts
+ if obj == dict():
+ raise AnsibleFilterError('to_ini received an empty dict. '
+ 'An empty dict cannot be converted.')
+
+ config = StringIO()
+ ini_parser.write(config)
+
+ # config.getvalue() returns two \n at the end
+ # with the below insanity, we remove the very last character of
+ # the resulting string
+ return ''.join(config.getvalue().rsplit(config.getvalue()[-1], 1))
+
+
+class FilterModule(object):
+ ''' Query filter '''
+
+ def filters(self):
+
+ return {
+ 'to_ini': to_ini
+ }
diff --git a/plugins/filter/to_milliseconds.yml b/plugins/filter/to_milliseconds.yml
index b6bb7e4be0..f25bd86623 100644
--- a/plugins/filter/to_milliseconds.yml
+++ b/plugins/filter/to_milliseconds.yml
@@ -5,7 +5,7 @@
DOCUMENTATION:
name: to_milliseconds
- short_description: Converte a duration string to milliseconds
+ short_description: Converts a duration string to milliseconds
version_added: 0.2.0
description:
- Parse a human readable time duration string and convert to milliseconds.
@@ -13,12 +13,12 @@ DOCUMENTATION:
_input:
description:
- The time string to convert.
- - Can use the units C(y) and C(year) for a year, C(mo) and C(month) for a month, C(w) and C(week) for a week,
- C(d) and C(day) for a day, C(h) and C(hour) for a hour, C(m), C(min) and C(minute) for minutes, C(s), C(sec)
- and C(second) for seconds, C(ms), C(msec), C(msecond) and C(millisecond) for milliseconds. The suffix C(s)
- can be added to a unit as well, so C(seconds) is the same as C(second).
+ - Can use the units V(y) and V(year) for a year, V(mo) and V(month) for a month, V(w) and V(week) for a week,
+ V(d) and V(day) for a day, V(h) and V(hour) for a hour, V(m), V(min) and V(minute) for minutes, V(s), V(sec)
+ and V(second) for seconds, V(ms), V(msec), V(msecond) and V(millisecond) for milliseconds. The suffix V(s)
+ can be added to a unit as well, so V(seconds) is the same as V(second).
- Valid strings are space separated combinations of an integer with an optional minus sign and a unit.
- - Examples are C(1h), C(-5m), and C(3h -5m 6s).
+ - Examples are V(1h), V(-5m), and V(3h -5m 6s).
type: string
required: true
year:
diff --git a/plugins/filter/to_minutes.yml b/plugins/filter/to_minutes.yml
index 3b85dadc43..924fb6feb3 100644
--- a/plugins/filter/to_minutes.yml
+++ b/plugins/filter/to_minutes.yml
@@ -5,7 +5,7 @@
DOCUMENTATION:
name: to_minutes
- short_description: Converte a duration string to minutes
+ short_description: Converts a duration string to minutes
version_added: 0.2.0
description:
- Parse a human readable time duration string and convert to minutes.
@@ -13,12 +13,12 @@ DOCUMENTATION:
_input:
description:
- The time string to convert.
- - Can use the units C(y) and C(year) for a year, C(mo) and C(month) for a month, C(w) and C(week) for a week,
- C(d) and C(day) for a day, C(h) and C(hour) for a hour, C(m), C(min) and C(minute) for minutes, C(s), C(sec)
- and C(second) for seconds, C(ms), C(msec), C(msecond) and C(millisecond) for milliseconds. The suffix C(s)
- can be added to a unit as well, so C(seconds) is the same as C(second).
+ - Can use the units V(y) and V(year) for a year, V(mo) and V(month) for a month, V(w) and V(week) for a week,
+ V(d) and V(day) for a day, V(h) and V(hour) for a hour, V(m), V(min) and V(minute) for minutes, V(s), V(sec)
+ and V(second) for seconds, V(ms), V(msec), V(msecond) and V(millisecond) for milliseconds. The suffix V(s)
+ can be added to a unit as well, so V(seconds) is the same as V(second).
- Valid strings are space separated combinations of an integer with an optional minus sign and a unit.
- - Examples are C(1h), C(-5m), and C(3h -5m 6s).
+ - Examples are V(1h), V(-5m), and V(3h -5m 6s).
type: string
required: true
year:
diff --git a/plugins/filter/to_months.yml b/plugins/filter/to_months.yml
index f13cee918e..09e9c38b5d 100644
--- a/plugins/filter/to_months.yml
+++ b/plugins/filter/to_months.yml
@@ -5,7 +5,7 @@
DOCUMENTATION:
name: to_months
- short_description: Converte a duration string to months
+ short_description: Convert a duration string to months
version_added: 0.2.0
description:
- Parse a human readable time duration string and convert to months.
@@ -13,12 +13,12 @@ DOCUMENTATION:
_input:
description:
- The time string to convert.
- - Can use the units C(y) and C(year) for a year, C(mo) and C(month) for a month, C(w) and C(week) for a week,
- C(d) and C(day) for a day, C(h) and C(hour) for a hour, C(m), C(min) and C(minute) for minutes, C(s), C(sec)
- and C(second) for seconds, C(ms), C(msec), C(msecond) and C(millisecond) for milliseconds. The suffix C(s)
- can be added to a unit as well, so C(seconds) is the same as C(second).
+ - Can use the units V(y) and V(year) for a year, V(mo) and V(month) for a month, V(w) and V(week) for a week,
+ V(d) and V(day) for a day, V(h) and V(hour) for a hour, V(m), V(min) and V(minute) for minutes, V(s), V(sec)
+ and V(second) for seconds, V(ms), V(msec), V(msecond) and V(millisecond) for milliseconds. The suffix V(s)
+ can be added to a unit as well, so V(seconds) is the same as V(second).
- Valid strings are space separated combinations of an integer with an optional minus sign and a unit.
- - Examples are C(1h), C(-5m), and C(3h -5m 6s).
+ - Examples are V(1h), V(-5m), and V(3h -5m 6s).
type: string
required: true
year:
diff --git a/plugins/filter/to_nice_yaml.yml b/plugins/filter/to_nice_yaml.yml
new file mode 100644
index 0000000000..fe7a316f46
--- /dev/null
+++ b/plugins/filter/to_nice_yaml.yml
@@ -0,0 +1,89 @@
+# Copyright (c) Contributors to the Ansible project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+DOCUMENTATION:
+ name: to_nice_yaml
+ author:
+ - Ansible Core Team
+ - Felix Fontein (@felixfontein)
+ version_added: 11.3.0
+ short_description: Convert variable to YAML string
+ description:
+ - Converts an Ansible variable into a YAML string representation, without preserving vaulted strings as P(ansible.builtin.to_yaml#filter).
+ - This filter functions as a wrapper to the L(Python PyYAML library, https://pypi.org/project/PyYAML/)'s C(yaml.dump) function.
+ positional: _input
+ options:
+ _input:
+ description:
+ - A variable or expression that returns a data structure.
+ type: raw
+ required: true
+ indent:
+ description:
+ - Number of spaces to indent Python structures, mainly used for display to humans.
+ type: integer
+ default: 2
+ sort_keys:
+ description:
+ - Affects sorting of dictionary keys.
+ default: true
+ type: bool
+ default_style:
+ description:
+ - Indicates the style of the scalar.
+ choices:
+ - ''
+ - "'"
+ - '"'
+ - '|'
+ - '>'
+ type: string
+ canonical:
+ description:
+ - If set to V(true), export tag type to the output.
+ type: bool
+ width:
+ description:
+ - Set the preferred line width.
+ type: integer
+ line_break:
+ description:
+ - Specify the line break.
+ type: string
+ encoding:
+ description:
+ - Specify the output encoding.
+ type: string
+ explicit_start:
+ description:
+ - If set to V(true), adds an explicit start using C(---).
+ type: bool
+ explicit_end:
+ description:
+ - If set to V(true), adds an explicit end using C(...).
+ type: bool
+ redact_sensitive_values:
+ description:
+ - If set to V(true), vaulted strings are replaced by V() instead of being decrypted.
+ - With future ansible-core versions, this can extend to other strings tagged as sensitive.
+ - B(Note) that with ansible-core 2.18 and before this might not yield the expected result
+ since these versions of ansible-core strip the vault information away from strings that are
+ part of more complex data structures specified in C(vars).
+ type: bool
+ default: false
+ notes:
+ - More options may be available, see L(PyYAML documentation, https://pyyaml.org/wiki/PyYAMLDocumentation) for details.
+ - >-
+ These parameters to C(yaml.dump) are not accepted, as they are overridden internally: O(ignore:allow_unicode).
+
+EXAMPLES: |
+ ---
+ # Dump variable in a template to create a YAML document
+ value: "{{ github_workflow | community.general.to_nice_yaml }}"
+
+RETURN:
+ _value:
+ description:
+ - The YAML serialized string representing the variable structure inputted.
+ type: string
diff --git a/plugins/filter/to_prettytable.py b/plugins/filter/to_prettytable.py
new file mode 100644
index 0000000000..266a426cf2
--- /dev/null
+++ b/plugins/filter/to_prettytable.py
@@ -0,0 +1,409 @@
+# Copyright (c) 2025, Timur Gadiev
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import annotations
+
+DOCUMENTATION = r"""
+name: to_prettytable
+short_description: Format a list of dictionaries as an ASCII table
+version_added: "10.7.0"
+author: Timur Gadiev (@tgadiev)
+description:
+ - This filter takes a list of dictionaries and formats it as an ASCII table using the I(prettytable) Python library.
+requirements:
+ - prettytable
+options:
+ _input:
+ description: A list of dictionaries to format.
+ type: list
+ elements: dictionary
+ required: true
+ column_order:
+ description: List of column names to specify the order of columns in the table.
+ type: list
+ elements: string
+ header_names:
+ description: List of custom header names to use instead of dictionary keys.
+ type: list
+ elements: string
+ column_alignments:
+ description:
+ - Dictionary where keys are column names and values are alignment settings. Valid alignment values are C(left), C(center),
+ C(right), C(l), C(c), or C(r).
+ - "For example, V({'name': 'left', 'id': 'right'}) aligns the C(name) column to the left and the C(id) column to the
+ right."
+ type: dictionary
+"""
+
+EXAMPLES = r"""
+- name: Set a list of users
+ ansible.builtin.set_fact:
+ users:
+ - name: Alice
+ age: 25
+ role: admin
+ - name: Bob
+ age: 30
+ role: user
+
+- name: Display a list of users as a table
+ ansible.builtin.debug:
+ msg: >-
+ {{
+ users | community.general.to_prettytable
+ }}
+
+- name: Display a table with custom column ordering
+ ansible.builtin.debug:
+ msg: >-
+ {{
+ users | community.general.to_prettytable(
+ column_order=['role', 'name', 'age']
+ )
+ }}
+
+- name: Display a table with selective column output (only show name and role fields)
+ ansible.builtin.debug:
+ msg: >-
+ {{
+ users | community.general.to_prettytable(
+ column_order=['name', 'role']
+ )
+ }}
+
+- name: Display a table with custom headers
+ ansible.builtin.debug:
+ msg: >-
+ {{
+ users | community.general.to_prettytable(
+ header_names=['User Name', 'User Age', 'User Role']
+ )
+ }}
+
+- name: Display a table with custom alignments
+ ansible.builtin.debug:
+ msg: >-
+ {{
+ users | community.general.to_prettytable(
+ column_alignments={'name': 'center', 'age': 'right', 'role': 'left'}
+ )
+ }}
+
+- name: Combine multiple options
+ ansible.builtin.debug:
+ msg: >-
+ {{
+ users | community.general.to_prettytable(
+ column_order=['role', 'name', 'age'],
+ header_names=['Position', 'Full Name', 'Years'],
+ column_alignments={'name': 'center', 'age': 'right', 'role': 'left'}
+ )
+ }}
+"""
+
+RETURN = r"""
+_value:
+ description: The formatted ASCII table.
+ type: string
+"""
+
+try:
+ import prettytable
+ HAS_PRETTYTABLE = True
+except ImportError:
+ HAS_PRETTYTABLE = False
+
+from ansible.errors import AnsibleFilterError
+from ansible.module_utils.common.text.converters import to_text
+
+
+class TypeValidationError(AnsibleFilterError):
+ """Custom exception for type validation errors.
+
+ Args:
+ obj: The object with incorrect type
+ expected: Description of expected type
+ """
+ def __init__(self, obj, expected):
+ type_name = "string" if isinstance(obj, str) else type(obj).__name__
+ super().__init__(f"Expected {expected}, got a {type_name}")
+
+
+def _validate_list_param(param, param_name, ensure_strings=True):
+ """Validate a parameter is a list and optionally ensure all elements are strings.
+
+ Args:
+ param: The parameter to validate
+ param_name: The name of the parameter for error messages
+ ensure_strings: Whether to check that all elements are strings
+
+ Raises:
+ AnsibleFilterError: If validation fails
+ """
+ # Map parameter names to their original error message format
+ error_messages = {
+ "column_order": "a list of column names",
+ "header_names": "a list of header names"
+ }
+
+ # Use the specific error message if available, otherwise use a generic one
+ error_msg = error_messages.get(param_name, f"a list for {param_name}")
+
+ if not isinstance(param, list):
+ raise TypeValidationError(param, error_msg)
+
+ if ensure_strings:
+ for item in param:
+ if not isinstance(item, str):
+ # Maintain original error message format
+ if param_name == "column_order":
+ error_msg = "a string for column name"
+ elif param_name == "header_names":
+ error_msg = "a string for header name"
+ else:
+ error_msg = f"a string for {param_name} element"
+ raise TypeValidationError(item, error_msg)
+
+
+def _match_key(item_dict, lookup_key):
+ """Find a matching key in a dictionary, handling type conversion.
+
+ Args:
+ item_dict: Dictionary to search in
+ lookup_key: Key to look for, possibly needing type conversion
+
+ Returns:
+ The matching key or None if no match found
+ """
+ # Direct key match
+ if lookup_key in item_dict:
+ return lookup_key
+
+ # Try boolean conversion for 'true'/'false' strings
+ if isinstance(lookup_key, str):
+ if lookup_key.lower() == 'true' and True in item_dict:
+ return True
+ if lookup_key.lower() == 'false' and False in item_dict:
+ return False
+
+ # Try numeric conversion for string numbers
+ if lookup_key.isdigit() and int(lookup_key) in item_dict:
+ return int(lookup_key)
+
+ # No match found
+ return None
+
+
+def _build_key_maps(data):
+ """Build mappings between string keys and original keys.
+
+ Args:
+ data: List of dictionaries with keys to map
+
+ Returns:
+ Tuple of (key_map, reverse_key_map)
+ """
+ key_map = {}
+ reverse_key_map = {}
+
+ # Check if the data list is not empty
+ if not data:
+ return key_map, reverse_key_map
+
+ first_dict = data[0]
+ for orig_key in first_dict.keys():
+ # Store string version of the key
+ str_key = to_text(orig_key)
+ key_map[str_key] = orig_key
+ # Also store lowercase version for case-insensitive lookups
+ reverse_key_map[str_key.lower()] = orig_key
+
+ return key_map, reverse_key_map
+
+
+def _configure_alignments(table, field_names, column_alignments):
+ """Configure column alignments for the table.
+
+ Args:
+ table: The PrettyTable instance to configure
+ field_names: List of field names to align
+ column_alignments: Dict of column alignments
+ """
+ valid_alignments = {"left", "center", "right", "l", "c", "r"}
+
+ if not isinstance(column_alignments, dict):
+ return
+
+ for col_name, alignment in column_alignments.items():
+ if col_name in field_names:
+ # We already validated alignment is a string and a valid value in the main function
+ # Just apply it here
+ alignment = alignment.lower()
+ table.align[col_name] = alignment[0]
+
+
+def to_prettytable(data, *args, **kwargs):
+ """Convert a list of dictionaries to an ASCII table.
+
+ Args:
+ data: List of dictionaries to format
+ *args: Optional list of column names to specify column order
+ **kwargs: Optional keyword arguments:
+ - column_order: List of column names to specify the order
+ - header_names: List of custom header names
+ - column_alignments: Dict of column alignments (left, center, right)
+
+ Returns:
+ String containing the ASCII table
+ """
+ if not HAS_PRETTYTABLE:
+ raise AnsibleFilterError(
+ 'You need to install "prettytable" Python module to use this filter'
+ )
+
+ # === Input validation ===
+ # Validate list type
+ if not isinstance(data, list):
+ raise TypeValidationError(data, "a list of dictionaries")
+
+ # Validate dictionary items if list is not empty
+ if data and not all(isinstance(item, dict) for item in data):
+ invalid_item = next((item for item in data if not isinstance(item, dict)), None)
+ raise TypeValidationError(invalid_item, "all items in the list to be dictionaries")
+
+ # Get sample dictionary to determine fields - empty if no data
+ sample_dict = data[0] if data else {}
+ max_fields = len(sample_dict)
+
+ # === Process column order ===
+ # Handle both positional and keyword column_order
+ column_order = kwargs.pop('column_order', None)
+
+ # Check for conflict between args and column_order
+ if args and column_order is not None:
+ raise AnsibleFilterError("Cannot use both positional arguments and the 'column_order' keyword argument")
+
+ # Use positional args if provided
+ if args:
+ column_order = list(args)
+
+ # Validate column_order
+ if column_order is not None:
+ _validate_list_param(column_order, "column_order")
+
+ # Validate column_order doesn't exceed the number of fields (skip if data is empty)
+ if data and len(column_order) > max_fields:
+ raise AnsibleFilterError(
+ f"'column_order' has more elements ({len(column_order)}) than available fields in data ({max_fields})")
+
+ # === Process headers ===
+ # Determine field names and ensure they are strings
+ if column_order:
+ field_names = column_order
+ else:
+ # Use field names from first dictionary, ensuring all are strings
+ field_names = [to_text(k) for k in sample_dict]
+
+ # Process custom headers
+ header_names = kwargs.pop('header_names', None)
+ if header_names is not None:
+ _validate_list_param(header_names, "header_names")
+
+ # Validate header_names doesn't exceed the number of fields (skip if data is empty)
+ if data and len(header_names) > max_fields:
+ raise AnsibleFilterError(
+ f"'header_names' has more elements ({len(header_names)}) than available fields in data ({max_fields})")
+
+ # Validate that column_order and header_names have the same size if both provided
+ if column_order is not None and len(column_order) != len(header_names):
+ raise AnsibleFilterError(
+ f"'column_order' and 'header_names' must have the same number of elements. "
+ f"Got {len(column_order)} columns and {len(header_names)} headers.")
+
+ # === Process alignments ===
+ # Get column alignments and validate
+ column_alignments = kwargs.pop('column_alignments', {})
+ valid_alignments = {"left", "center", "right", "l", "c", "r"}
+
+ # Validate column_alignments is a dictionary
+ if not isinstance(column_alignments, dict):
+ raise TypeValidationError(column_alignments, "a dictionary for column_alignments")
+
+ # Validate column_alignments keys and values
+ for key, value in column_alignments.items():
+ # Check that keys are strings
+ if not isinstance(key, str):
+ raise TypeValidationError(key, "a string for column_alignments key")
+
+ # Check that values are strings
+ if not isinstance(value, str):
+ raise TypeValidationError(value, "a string for column_alignments value")
+
+ # Check that values are valid alignments
+ if value.lower() not in valid_alignments:
+ raise AnsibleFilterError(
+ f"Invalid alignment '{value}' in 'column_alignments'. "
+ f"Valid alignments are: {', '.join(sorted(valid_alignments))}")
+
+ # Validate column_alignments doesn't have more keys than fields (skip if data is empty)
+ if data and len(column_alignments) > max_fields:
+ raise AnsibleFilterError(
+ f"'column_alignments' has more elements ({len(column_alignments)}) than available fields in data ({max_fields})")
+
+ # Check for unknown parameters
+ if kwargs:
+ raise AnsibleFilterError(f"Unknown parameter(s) for to_prettytable filter: {', '.join(sorted(kwargs))}")
+
+ # === Build the table ===
+ table = prettytable.PrettyTable()
+
+ # Set the field names for display
+ display_names = header_names if header_names is not None else field_names
+ table.field_names = [to_text(name) for name in display_names]
+
+ # Configure alignments after setting field_names
+ _configure_alignments(table, display_names, column_alignments)
+
+ # Build key maps only if not using explicit column_order and we have data
+ key_map = {}
+ reverse_key_map = {}
+ if not column_order and data: # Only needed when using original dictionary keys and we have data
+ key_map, reverse_key_map = _build_key_maps(data)
+
+ # If we have an empty list with no custom parameters, return a simple empty table
+ if not data and not column_order and not header_names and not column_alignments:
+ return "++\n++"
+
+ # Process each row if we have data
+ for item in data:
+ row = []
+ for col in field_names:
+ # Try direct mapping first
+ if col in key_map:
+ row.append(item.get(key_map[col], ""))
+ else:
+ # Try to find a matching key in the item
+ matched_key = _match_key(item, col)
+ if matched_key is not None:
+ row.append(item.get(matched_key, ""))
+ else:
+ # Try case-insensitive lookup as last resort
+ lower_col = col.lower() if isinstance(col, str) else str(col).lower()
+ if lower_col in reverse_key_map:
+ row.append(item.get(reverse_key_map[lower_col], ""))
+ else:
+ # No match found
+ row.append("")
+ table.add_row(row)
+
+ return to_text(table)
+
+
+class FilterModule(object):
+ """Ansible core jinja2 filters."""
+
+ def filters(self):
+ return {
+ 'to_prettytable': to_prettytable
+ }
diff --git a/plugins/filter/to_seconds.yml b/plugins/filter/to_seconds.yml
index d6e6c4e467..49b69d6d69 100644
--- a/plugins/filter/to_seconds.yml
+++ b/plugins/filter/to_seconds.yml
@@ -5,7 +5,7 @@
DOCUMENTATION:
name: to_seconds
- short_description: Converte a duration string to seconds
+ short_description: Converts a duration string to seconds
version_added: 0.2.0
description:
- Parse a human readable time duration string and convert to seconds.
@@ -13,12 +13,12 @@ DOCUMENTATION:
_input:
description:
- The time string to convert.
- - Can use the units C(y) and C(year) for a year, C(mo) and C(month) for a month, C(w) and C(week) for a week,
- C(d) and C(day) for a day, C(h) and C(hour) for a hour, C(m), C(min) and C(minute) for minutes, C(s), C(sec)
- and C(second) for seconds, C(ms), C(msec), C(msecond) and C(millisecond) for milliseconds. The suffix C(s)
- can be added to a unit as well, so C(seconds) is the same as C(second).
+ - Can use the units V(y) and V(year) for a year, V(mo) and V(month) for a month, V(w) and V(week) for a week,
+ V(d) and V(day) for a day, V(h) and V(hour) for a hour, V(m), V(min) and V(minute) for minutes, V(s), V(sec)
+ and V(second) for seconds, V(ms), V(msec), V(msecond) and V(millisecond) for milliseconds. The suffix V(s)
+ can be added to a unit as well, so V(seconds) is the same as V(second).
- Valid strings are space separated combinations of an integer with an optional minus sign and a unit.
- - Examples are C(1h), C(-5m), and C(3h -5m 6s).
+ - Examples are V(1h), V(-5m), and V(3h -5m 6s).
type: string
required: true
year:
diff --git a/plugins/filter/to_time_unit.yml b/plugins/filter/to_time_unit.yml
index c0149f0acd..256ca573f4 100644
--- a/plugins/filter/to_time_unit.yml
+++ b/plugins/filter/to_time_unit.yml
@@ -5,7 +5,7 @@
DOCUMENTATION:
name: to_time_unit
- short_description: Converte a duration string to the given time unit
+ short_description: Converts a duration string to the given time unit
version_added: 0.2.0
description:
- Parse a human readable time duration string and convert to the given time unit.
@@ -14,12 +14,12 @@ DOCUMENTATION:
_input:
description:
- The time string to convert.
- - Can use the units C(y) and C(year) for a year, C(mo) and C(month) for a month, C(w) and C(week) for a week,
- C(d) and C(day) for a day, C(h) and C(hour) for a hour, C(m), C(min) and C(minute) for minutes, C(s), C(sec)
- and C(second) for seconds, C(ms), C(msec), C(msecond) and C(millisecond) for milliseconds. The suffix C(s)
- can be added to a unit as well, so C(seconds) is the same as C(second).
+ - Can use the units V(y) and V(year) for a year, V(mo) and V(month) for a month, V(w) and V(week) for a week,
+ V(d) and V(day) for a day, V(h) and V(hour) for a hour, V(m), V(min) and V(minute) for minutes, V(s), V(sec)
+ and V(second) for seconds, V(ms), V(msec), V(msecond) and V(millisecond) for milliseconds. The suffix V(s)
+ can be added to a unit as well, so V(seconds) is the same as V(second).
- Valid strings are space separated combinations of an integer with an optional minus sign and a unit.
- - Examples are C(1h), C(-5m), and C(3h -5m 6s).
+ - Examples are V(1h), V(-5m), and V(3h -5m 6s).
type: string
required: true
unit:
diff --git a/plugins/filter/to_weeks.yml b/plugins/filter/to_weeks.yml
index 499c386276..750e77c378 100644
--- a/plugins/filter/to_weeks.yml
+++ b/plugins/filter/to_weeks.yml
@@ -5,7 +5,7 @@
DOCUMENTATION:
name: to_weeks
- short_description: Converte a duration string to weeks
+ short_description: Converts a duration string to weeks
version_added: 0.2.0
description:
- Parse a human readable time duration string and convert to weeks.
@@ -13,12 +13,12 @@ DOCUMENTATION:
_input:
description:
- The time string to convert.
- - Can use the units C(y) and C(year) for a year, C(mo) and C(month) for a month, C(w) and C(week) for a week,
- C(d) and C(day) for a day, C(h) and C(hour) for a hour, C(m), C(min) and C(minute) for minutes, C(s), C(sec)
- and C(second) for seconds, C(ms), C(msec), C(msecond) and C(millisecond) for milliseconds. The suffix C(s)
- can be added to a unit as well, so C(seconds) is the same as C(second).
+ - Can use the units V(y) and V(year) for a year, V(mo) and V(month) for a month, V(w) and V(week) for a week,
+ V(d) and V(day) for a day, V(h) and V(hour) for a hour, V(m), V(min) and V(minute) for minutes, V(s), V(sec)
+ and V(second) for seconds, V(ms), V(msec), V(msecond) and V(millisecond) for milliseconds. The suffix V(s)
+ can be added to a unit as well, so V(seconds) is the same as V(second).
- Valid strings are space separated combinations of an integer with an optional minus sign and a unit.
- - Examples are C(1h), C(-5m), and C(3h -5m 6s).
+ - Examples are V(1h), V(-5m), and V(3h -5m 6s).
type: string
required: true
year:
diff --git a/plugins/filter/to_yaml.py b/plugins/filter/to_yaml.py
new file mode 100644
index 0000000000..905b04271c
--- /dev/null
+++ b/plugins/filter/to_yaml.py
@@ -0,0 +1,113 @@
+# Copyright (c) Contributors to the Ansible project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import annotations
+
+import typing as t
+from collections.abc import Mapping, Set
+
+from yaml import dump
+try:
+ from yaml.cyaml import CSafeDumper as SafeDumper
+except ImportError:
+ from yaml import SafeDumper
+
+from ansible.module_utils.common.collections import is_sequence
+try:
+ # This is ansible-core 2.19+
+ from ansible.utils.vars import transform_to_native_types
+ from ansible.parsing.vault import VaultHelper, VaultLib
+except ImportError:
+ transform_to_native_types = None
+
+from ansible.parsing.yaml.objects import AnsibleVaultEncryptedUnicode
+from ansible.utils.unsafe_proxy import AnsibleUnsafe
+
+
+def _to_native_types_compat(value: t.Any, *, redact_value: str | None) -> t.Any:
+ """Compatibility function for ansible-core 2.18 and before."""
+ if value is None:
+ return value
+ if isinstance(value, AnsibleUnsafe):
+ # This only works up to ansible-core 2.18:
+ return _to_native_types_compat(value._strip_unsafe(), redact_value=redact_value)
+ # But that's fine, since this code path isn't taken on ansible-core 2.19+ anyway.
+ if isinstance(value, Mapping):
+ return {
+ _to_native_types_compat(key, redact_value=redact_value): _to_native_types_compat(val, redact_value=redact_value)
+ for key, val in value.items()
+ }
+ if isinstance(value, Set):
+ return {_to_native_types_compat(elt, redact_value=redact_value) for elt in value}
+ if is_sequence(value):
+ return [_to_native_types_compat(elt, redact_value=redact_value) for elt in value]
+ if isinstance(value, AnsibleVaultEncryptedUnicode):
+ if redact_value is not None:
+ return redact_value
+ # This only works up to ansible-core 2.18:
+ return value.data
+ # But that's fine, since this code path isn't taken on ansible-core 2.19+ anyway.
+ if isinstance(value, bytes):
+ return bytes(value)
+ if isinstance(value, str):
+ return str(value)
+
+ return value
+
+
+def _to_native_types(value: t.Any, *, redact: bool) -> t.Any:
+ if isinstance(value, Mapping):
+ return {_to_native_types(k, redact=redact): _to_native_types(v, redact=redact) for k, v in value.items()}
+ if is_sequence(value):
+ return [_to_native_types(e, redact=redact) for e in value]
+ if redact:
+ ciphertext = VaultHelper.get_ciphertext(value, with_tags=False)
+ if ciphertext and VaultLib.is_encrypted(ciphertext):
+ return ""
+ return transform_to_native_types(value, redact=redact)
+
+
+def remove_all_tags(value: t.Any, *, redact_sensitive_values: bool = False) -> t.Any:
+ """
+ Remove all tags from all values in the input.
+
+ If ``redact_sensitive_values`` is ``True``, all sensitive values will be redacted.
+ """
+ if transform_to_native_types is not None:
+ return _to_native_types(value, redact=redact_sensitive_values)
+
+ return _to_native_types_compat(
+ value,
+ redact_value="" if redact_sensitive_values else None, # same string as in ansible-core 2.19 by transform_to_native_types()
+ )
+
+
+def to_yaml(value: t.Any, *, redact_sensitive_values: bool = False, default_flow_style: bool | None = None, **kwargs) -> str:
+ """Serialize input as terse flow-style YAML."""
+ return dump(
+ remove_all_tags(value, redact_sensitive_values=redact_sensitive_values),
+ Dumper=SafeDumper,
+ allow_unicode=True,
+ default_flow_style=default_flow_style,
+ **kwargs,
+ )
+
+
+def to_nice_yaml(value: t.Any, *, redact_sensitive_values: bool = False, indent: int = 2, default_flow_style: bool = False, **kwargs) -> str:
+ """Serialize input as verbose multi-line YAML."""
+ return to_yaml(
+ value,
+ redact_sensitive_values=redact_sensitive_values,
+ default_flow_style=default_flow_style,
+ indent=indent,
+ **kwargs,
+ )
+
+
+class FilterModule(object):
+ def filters(self):
+ return {
+ 'to_yaml': to_yaml,
+ 'to_nice_yaml': to_nice_yaml,
+ }
diff --git a/plugins/filter/to_yaml.yml b/plugins/filter/to_yaml.yml
new file mode 100644
index 0000000000..066f8d990d
--- /dev/null
+++ b/plugins/filter/to_yaml.yml
@@ -0,0 +1,92 @@
+# Copyright (c) Contributors to the Ansible project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+DOCUMENTATION:
+ name: to_yaml
+ author:
+ - Ansible Core Team
+ - Felix Fontein (@felixfontein)
+ version_added: 11.3.0
+ short_description: Convert variable to YAML string
+ description:
+ - Converts an Ansible variable into a YAML string representation, without preserving vaulted strings as P(ansible.builtin.to_yaml#filter).
+ - This filter functions as a wrapper to the L(Python PyYAML library, https://pypi.org/project/PyYAML/)'s C(yaml.dump) function.
+ positional: _input
+ options:
+ _input:
+ description:
+ - A variable or expression that returns a data structure.
+ type: raw
+ required: true
+ indent:
+ description:
+ - Number of spaces to indent Python structures, mainly used for display to humans.
+ type: integer
+ sort_keys:
+ description:
+ - Affects sorting of dictionary keys.
+ default: true
+ type: bool
+ default_style:
+ description:
+ - Indicates the style of the scalar.
+ choices:
+ - ''
+ - "'"
+ - '"'
+ - '|'
+ - '>'
+ type: string
+ canonical:
+ description:
+ - If set to V(true), export tag type to the output.
+ type: bool
+ width:
+ description:
+ - Set the preferred line width.
+ type: integer
+ line_break:
+ description:
+ - Specify the line break.
+ type: string
+ encoding:
+ description:
+ - Specify the output encoding.
+ type: string
+ explicit_start:
+ description:
+ - If set to V(true), adds an explicit start using C(---).
+ type: bool
+ explicit_end:
+ description:
+ - If set to V(true), adds an explicit end using C(...).
+ type: bool
+ redact_sensitive_values:
+ description:
+ - If set to V(true), vaulted strings are replaced by V() instead of being decrypted.
+ - With future ansible-core versions, this can extend to other strings tagged as sensitive.
+ - B(Note) that with ansible-core 2.18 and before this might not yield the expected result
+ since these versions of ansible-core strip the vault information away from strings that are
+ part of more complex data structures specified in C(vars).
+ type: bool
+ default: false
+ notes:
+ - More options may be available, see L(PyYAML documentation, https://pyyaml.org/wiki/PyYAMLDocumentation) for details.
+ - >-
+ These parameters to C(yaml.dump) are not accepted, as they are overridden internally: O(ignore:allow_unicode).
+
+EXAMPLES: |
+ ---
+ # Dump variable in a template to create a YAML document
+ value: "{{ github_workflow | community.general.to_yaml }}"
+
+ ---
+ # Same as above but 'prettier' (equivalent to community.general.to_nice_yaml filter)
+ value: "{{ docker_config | community.general.to_yaml(indent=2) }}"
+
+RETURN:
+ _value:
+ description:
+ - The YAML serialized string representing the variable structure inputted.
+ type: string
diff --git a/plugins/filter/to_years.yml b/plugins/filter/to_years.yml
index 1a244a276f..62f282a8b6 100644
--- a/plugins/filter/to_years.yml
+++ b/plugins/filter/to_years.yml
@@ -5,7 +5,7 @@
DOCUMENTATION:
name: to_years
- short_description: Converte a duration string to years
+ short_description: Converts a duration string to years
version_added: 0.2.0
description:
- Parse a human readable time duration string and convert to years.
@@ -13,12 +13,12 @@ DOCUMENTATION:
_input:
description:
- The time string to convert.
- - Can use the units C(y) and C(year) for a year, C(mo) and C(month) for a month, C(w) and C(week) for a week,
- C(d) and C(day) for a day, C(h) and C(hour) for a hour, C(m), C(min) and C(minute) for minutes, C(s), C(sec)
- and C(second) for seconds, C(ms), C(msec), C(msecond) and C(millisecond) for milliseconds. The suffix C(s)
- can be added to a unit as well, so C(seconds) is the same as C(second).
+ - Can use the units V(y) and V(year) for a year, V(mo) and V(month) for a month, V(w) and V(week) for a week,
+ V(d) and V(day) for a day, V(h) and V(hour) for a hour, V(m), V(min) and V(minute) for minutes, V(s), V(sec)
+ and V(second) for seconds, V(ms), V(msec), V(msecond) and V(millisecond) for milliseconds. The suffix V(s)
+ can be added to a unit as well, so V(seconds) is the same as V(second).
- Valid strings are space separated combinations of an integer with an optional minus sign and a unit.
- - Examples are C(1h), C(-5m), and C(3h -5m 6s).
+ - Examples are V(1h), V(-5m), and V(3h -5m 6s).
type: string
required: true
year:
diff --git a/plugins/filter/unicode_normalize.py b/plugins/filter/unicode_normalize.py
index dfbf20c573..f1fe18402b 100644
--- a/plugins/filter/unicode_normalize.py
+++ b/plugins/filter/unicode_normalize.py
@@ -1,56 +1,58 @@
-# -*- coding: utf-8 -*-
# Copyright (c) 2021, Andrew Pantuso (@ajpantuso)
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
+from __future__ import annotations
-DOCUMENTATION = '''
- name: unicode_normalize
- short_description: Normalizes unicode strings to facilitate comparison of characters with normalized forms
- version_added: 3.7.0
- author: Andrew Pantuso (@Ajpantuso)
- description:
- - Normalizes unicode strings to facilitate comparison of characters with normalized forms.
- positional: form
- options:
- _input:
- description: A unicode string.
- type: string
- required: true
- form:
- description:
- - The normal form to use.
- - See U(https://docs.python.org/3/library/unicodedata.html#unicodedata.normalize) for details.
- type: string
- default: NFC
- choices:
- - NFC
- - NFD
- - NFKC
- - NFKD
-'''
+DOCUMENTATION = r"""
+name: unicode_normalize
+short_description: Normalizes unicode strings to facilitate comparison of characters with normalized forms
+version_added: 3.7.0
+author: Andrew Pantuso (@Ajpantuso)
+description:
+ - Normalizes unicode strings to facilitate comparison of characters with normalized forms.
+positional: form
+options:
+ _input:
+ description: A unicode string.
+ type: string
+ required: true
+ form:
+ description:
+ - The normal form to use.
+ - See U(https://docs.python.org/3/library/unicodedata.html#unicodedata.normalize) for details.
+ type: string
+ default: NFC
+ choices:
+ - NFC
+ - NFD
+ - NFKC
+ - NFKD
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Normalize unicode string
ansible.builtin.set_fact:
dictionary: "{{ 'ä' | community.general.unicode_normalize('NFKD') }}"
# The resulting string has length 2: one letter is 'a', the other
# the diacritic combiner.
-'''
+"""
-RETURN = '''
- _value:
- description: The normalized unicode string of the specified normal form.
- type: string
-'''
+RETURN = r"""
+_value:
+ description: The normalized unicode string of the specified normal form.
+ type: string
+"""
from unicodedata import normalize
-from ansible.errors import AnsibleFilterError, AnsibleFilterTypeError
-from ansible.module_utils.six import text_type
+from ansible.errors import AnsibleFilterError
+
+try:
+ from ansible.errors import AnsibleTypeError
+except ImportError:
+ from ansible.errors import AnsibleFilterTypeError as AnsibleTypeError
def unicode_normalize(data, form='NFC'):
@@ -65,11 +67,11 @@ def unicode_normalize(data, form='NFC'):
A normalized unicode string of the specified 'form'.
"""
- if not isinstance(data, text_type):
- raise AnsibleFilterTypeError("%s is not a valid input type" % type(data))
+ if not isinstance(data, str):
+ raise AnsibleTypeError(f"{type(data)} is not a valid input type")
if form not in ('NFC', 'NFD', 'NFKC', 'NFKD'):
- raise AnsibleFilterError("%s is not a valid form" % form)
+ raise AnsibleFilterError(f"{form!r} is not a valid form")
return normalize(form, data)
diff --git a/plugins/filter/version_sort.py b/plugins/filter/version_sort.py
index 09eedbf563..893c7e5bd3 100644
--- a/plugins/filter/version_sort.py
+++ b/plugins/filter/version_sort.py
@@ -1,39 +1,37 @@
-# -*- coding: utf-8 -*-
# Copyright (C) 2021 Eric Lavarde
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
-DOCUMENTATION = '''
- name: version_sort
- short_description: Sort a list according to version order instead of pure alphabetical one
- version_added: 2.2.0
- author: Eric L. (@ericzolf)
- description:
- - Sort a list according to version order instead of pure alphabetical one.
- options:
- _input:
- description: A list of strings to sort.
- type: list
- elements: string
- required: true
-'''
+DOCUMENTATION = r"""
+name: version_sort
+short_description: Sort a list according to version order instead of pure alphabetical one
+version_added: 2.2.0
+author: Eric L. (@ericzolf)
+description:
+ - Sort a list according to version order instead of pure alphabetical one.
+options:
+ _input:
+ description: A list of strings to sort.
+ type: list
+ elements: string
+ required: true
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Convert list of tuples into dictionary
ansible.builtin.set_fact:
dictionary: "{{ ['2.1', '2.10', '2.9'] | community.general.version_sort }}"
# Result is ['2.1', '2.9', '2.10']
-'''
+"""
-RETURN = '''
- _value:
- description: The list of strings sorted by version.
- type: list
- elements: string
-'''
+RETURN = r"""
+_value:
+ description: The list of strings sorted by version.
+ type: list
+ elements: string
+"""
from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
diff --git a/plugins/inventory/cobbler.py b/plugins/inventory/cobbler.py
index b3288b27d4..7374193a74 100644
--- a/plugins/inventory/cobbler.py
+++ b/plugins/inventory/cobbler.py
@@ -1,103 +1,160 @@
-# -*- coding: utf-8 -*-
# Copyright (C) 2020 Orion Poplawski
# Copyright (c) 2020 Ansible Project
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
-DOCUMENTATION = '''
- author: Orion Poplawski (@opoplawski)
- name: cobbler
- short_description: Cobbler inventory source
- version_added: 1.0.0
+DOCUMENTATION = r"""
+author: Orion Poplawski (@opoplawski)
+name: cobbler
+short_description: Cobbler inventory source
+version_added: 1.0.0
+description:
+ - Get inventory hosts from the cobbler service.
+ - 'Uses a configuration file as an inventory source, it must end in C(.cobbler.yml) or C(.cobbler.yaml) and have a C(plugin:
+ cobbler) entry.'
+ - Adds the primary IP addresses to C(cobbler_ipv4_address) and C(cobbler_ipv6_address) host variables if defined in Cobbler.
+ The primary IP address is defined as the management interface if defined, or the interface who's DNS name matches the
+ hostname of the system, or else the first interface found.
+extends_documentation_fragment:
+ - inventory_cache
+options:
+ plugin:
+ description: The name of this plugin, it should always be set to V(community.general.cobbler) for this plugin to recognize
+ it as its own.
+ type: string
+ required: true
+ choices: ['cobbler', 'community.general.cobbler']
+ url:
+ description: URL to cobbler.
+ type: string
+ default: 'http://cobbler/cobbler_api'
+ env:
+ - name: COBBLER_SERVER
+ user:
+ description: Cobbler authentication user.
+ type: string
+ required: false
+ env:
+ - name: COBBLER_USER
+ password:
+ description: Cobbler authentication password.
+ type: string
+ required: false
+ env:
+ - name: COBBLER_PASSWORD
+ cache_fallback:
+ description: Fallback to cached results if connection to cobbler fails.
+ type: boolean
+ default: false
+ connection_timeout:
+ description: Timeout to connect to cobbler server.
+ type: int
+ required: false
+ version_added: 10.7.0
+ exclude_mgmt_classes:
+ description: Management classes to exclude from inventory.
+ type: list
+ default: []
+ elements: str
+ version_added: 7.4.0
+ exclude_profiles:
description:
- - Get inventory hosts from the cobbler service.
- - "Uses a configuration file as an inventory source, it must end in C(.cobbler.yml) or C(.cobbler.yaml) and has a C(plugin: cobbler) entry."
- extends_documentation_fragment:
- - inventory_cache
- options:
- plugin:
- description: The name of this plugin, it should always be set to C(community.general.cobbler) for this plugin to recognize it as it's own.
- required: yes
- choices: [ 'cobbler', 'community.general.cobbler' ]
- url:
- description: URL to cobbler.
- default: 'http://cobbler/cobbler_api'
- env:
- - name: COBBLER_SERVER
- user:
- description: Cobbler authentication user.
- required: no
- env:
- - name: COBBLER_USER
- password:
- description: Cobbler authentication password
- required: no
- env:
- - name: COBBLER_PASSWORD
- cache_fallback:
- description: Fallback to cached results if connection to cobbler fails
- type: boolean
- default: no
- exclude_profiles:
- description:
- - Profiles to exclude from inventory.
- - Ignored if I(include_profiles) is specified.
- type: list
- default: []
- elements: str
- include_profiles:
- description:
- - Profiles to include from inventory.
- - If specified, all other profiles will be excluded.
- - I(exclude_profiles) is ignored if I(include_profiles) is specified.
- type: list
- default: []
- elements: str
- version_added: 4.4.0
- group_by:
- description: Keys to group hosts by
- type: list
- elements: string
- default: [ 'mgmt_classes', 'owners', 'status' ]
- group:
- description: Group to place all hosts into
- default: cobbler
- group_prefix:
- description: Prefix to apply to cobbler groups
- default: cobbler_
- want_facts:
- description: Toggle, if C(true) the plugin will retrieve host facts from the server
- type: boolean
- default: yes
-'''
+ - Profiles to exclude from inventory.
+ - Ignored if O(include_profiles) is specified.
+ type: list
+ default: []
+ elements: str
+ include_mgmt_classes:
+ description: Management classes to include from inventory.
+ type: list
+ default: []
+ elements: str
+ version_added: 7.4.0
+ include_profiles:
+ description:
+ - Profiles to include from inventory.
+ - If specified, all other profiles are excluded.
+ - O(exclude_profiles) is ignored if O(include_profiles) is specified.
+ type: list
+ default: []
+ elements: str
+ version_added: 4.4.0
+ inventory_hostname:
+ description:
+ - What to use for the ansible inventory hostname.
+ - By default the networking hostname is used if defined, otherwise the DNS name of the management or first non-static
+ interface.
+ - If set to V(system), the cobbler system name is used.
+ type: str
+ choices: ['hostname', 'system']
+ default: hostname
+ version_added: 7.1.0
+ group_by:
+ description: Keys to group hosts by.
+ type: list
+ elements: string
+ default: ['mgmt_classes', 'owners', 'status']
+ group:
+ description: Group to place all hosts into.
+ default: cobbler
+ group_prefix:
+ description: Prefix to apply to cobbler groups.
+ default: cobbler_
+ want_facts:
+ description: Toggle, if V(true) the plugin retrieves all host facts from the server.
+ type: boolean
+ default: true
+ want_ip_addresses:
+ description:
+ - Toggle, if V(true) the plugin adds a C(cobbler_ipv4_addresses) and C(cobbler_ipv6_addresses) dictionary to the
+ defined O(group) mapping interface DNS names to IP addresses.
+ type: boolean
+ default: true
+ version_added: 7.1.0
+ facts_level:
+ description:
+ - Set to V(normal) to gather only system-level variables.
+ - Set to V(as_rendered) to gather all variables as rolled up by Cobbler.
+ type: string
+ choices: ['normal', 'as_rendered']
+ default: normal
+ version_added: 10.7.0
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
# my.cobbler.yml
plugin: community.general.cobbler
url: http://cobbler/cobbler_api
user: ansible-tester
password: secure
-'''
+"""
import socket
from ansible.errors import AnsibleError
-from ansible.module_utils.common.text.converters import to_text
-from ansible.module_utils.six import iteritems
from ansible.plugins.inventory import BaseInventoryPlugin, Cacheable, to_safe_group_name
+from ansible_collections.community.general.plugins.plugin_utils.unsafe import make_unsafe
+
# xmlrpc
try:
- import xmlrpclib as xmlrpc_client
+ import xmlrpc.client as xmlrpc_client
HAS_XMLRPC_CLIENT = True
except ImportError:
- try:
- import xmlrpc.client as xmlrpc_client
- HAS_XMLRPC_CLIENT = True
- except ImportError:
- HAS_XMLRPC_CLIENT = False
+ HAS_XMLRPC_CLIENT = False
+
+
+class TimeoutTransport (xmlrpc_client.SafeTransport):
+ def __init__(self, timeout=socket._GLOBAL_DEFAULT_TIMEOUT):
+ super(TimeoutTransport, self).__init__()
+ self._timeout = timeout
+ self.context = None
+
+ def make_connection(self, host):
+ conn = xmlrpc_client.SafeTransport.make_connection(self, host)
+ conn.timeout = self._timeout
+ return conn
class InventoryModule(BaseInventoryPlugin, Cacheable):
@@ -108,7 +165,9 @@ class InventoryModule(BaseInventoryPlugin, Cacheable):
def __init__(self):
super(InventoryModule, self).__init__()
self.cache_key = None
- self.connection = None
+
+ if not HAS_XMLRPC_CLIENT:
+ raise AnsibleError('Could not import xmlrpc client library')
def verify_file(self, path):
valid = False
@@ -119,18 +178,6 @@ class InventoryModule(BaseInventoryPlugin, Cacheable):
self.display.vvv('Skipping due to inventory source not ending in "cobbler.yaml" nor "cobbler.yml"')
return valid
- def _get_connection(self):
- if not HAS_XMLRPC_CLIENT:
- raise AnsibleError('Could not import xmlrpc client library')
-
- if self.connection is None:
- self.display.vvvv('Connecting to %s\n' % self.cobbler_url)
- self.connection = xmlrpc_client.Server(self.cobbler_url, allow_none=True)
- self.token = None
- if self.get_option('user') is not None:
- self.token = self.connection.login(self.get_option('user'), self.get_option('password'))
- return self.connection
-
def _init_cache(self):
if self.cache_key not in self._cache:
self._cache[self.cache_key] = {}
@@ -144,12 +191,11 @@ class InventoryModule(BaseInventoryPlugin, Cacheable):
def _get_profiles(self):
if not self.use_cache or 'profiles' not in self._cache.get(self.cache_key, {}):
- c = self._get_connection()
try:
if self.token is not None:
- data = c.get_profiles(self.token)
+ data = self.cobbler.get_profiles(self.token)
else:
- data = c.get_profiles()
+ data = self.cobbler.get_profiles()
except (socket.gaierror, socket.error, xmlrpc_client.ProtocolError):
self._reload_cache()
else:
@@ -160,12 +206,20 @@ class InventoryModule(BaseInventoryPlugin, Cacheable):
def _get_systems(self):
if not self.use_cache or 'systems' not in self._cache.get(self.cache_key, {}):
- c = self._get_connection()
try:
if self.token is not None:
- data = c.get_systems(self.token)
+ data = self.cobbler.get_systems(self.token)
else:
- data = c.get_systems()
+ data = self.cobbler.get_systems()
+
+ # If more facts are requested, gather them all from Cobbler
+ if self.facts_level == "as_rendered":
+ for i, host in enumerate(data):
+ self.display.vvvv(f"Gathering all facts for {host['name']}\n")
+ if self.token is not None:
+ data[i] = self.cobbler.get_system_as_rendered(host['name'], self.token)
+ else:
+ data[i] = self.cobbler.get_system_as_rendered(host['name'])
except (socket.gaierror, socket.error, xmlrpc_client.ProtocolError):
self._reload_cache()
else:
@@ -175,7 +229,7 @@ class InventoryModule(BaseInventoryPlugin, Cacheable):
return self._cache[self.cache_key]['systems']
def _add_safe_group_name(self, group, child=None):
- group_name = self.inventory.add_group(to_safe_group_name('%s%s' % (self.get_option('group_prefix'), group.lower().replace(" ", ""))))
+ group_name = self.inventory.add_group(to_safe_group_name(f"{self.get_option('group_prefix')}{group.lower().replace(' ', '')}"))
if child is not None:
self.inventory.add_child(group_name, child)
return group_name
@@ -195,25 +249,40 @@ class InventoryModule(BaseInventoryPlugin, Cacheable):
# get connection host
self.cobbler_url = self.get_option('url')
+ self.display.vvvv(f'Connecting to {self.cobbler_url}\n')
+
+ if 'connection_timeout' in self._options:
+ self.cobbler = xmlrpc_client.Server(self.cobbler_url, allow_none=True,
+ transport=TimeoutTransport(timeout=self.get_option('connection_timeout')))
+ else:
+ self.cobbler = xmlrpc_client.Server(self.cobbler_url, allow_none=True)
+ self.token = None
+ if self.get_option('user') is not None:
+ self.token = self.cobbler.login(str(self.get_option('user')), str(self.get_option('password')))
+
self.cache_key = self.get_cache_key(path)
self.use_cache = cache and self.get_option('cache')
+ self.exclude_mgmt_classes = self.get_option('exclude_mgmt_classes')
+ self.include_mgmt_classes = self.get_option('include_mgmt_classes')
self.exclude_profiles = self.get_option('exclude_profiles')
self.include_profiles = self.get_option('include_profiles')
self.group_by = self.get_option('group_by')
+ self.inventory_hostname = self.get_option('inventory_hostname')
+ self.facts_level = self.get_option('facts_level')
for profile in self._get_profiles():
if profile['parent']:
- self.display.vvvv('Processing profile %s with parent %s\n' % (profile['name'], profile['parent']))
+ self.display.vvvv(f"Processing profile {profile['name']} with parent {profile['parent']}\n")
if not self._exclude_profile(profile['parent']):
parent_group_name = self._add_safe_group_name(profile['parent'])
- self.display.vvvv('Added profile parent group %s\n' % parent_group_name)
+ self.display.vvvv(f'Added profile parent group {parent_group_name}\n')
if not self._exclude_profile(profile['name']):
group_name = self._add_safe_group_name(profile['name'])
- self.display.vvvv('Added profile group %s\n' % group_name)
+ self.display.vvvv(f'Added profile group {group_name}\n')
self.inventory.add_child(parent_group_name, group_name)
else:
- self.display.vvvv('Processing profile %s without parent\n' % profile['name'])
+ self.display.vvvv(f"Processing profile {profile['name']} without parent\n")
# Create a hierarchy of profile names
profile_elements = profile['name'].split('-')
i = 0
@@ -221,12 +290,12 @@ class InventoryModule(BaseInventoryPlugin, Cacheable):
profile_group = '-'.join(profile_elements[0:i + 1])
profile_group_child = '-'.join(profile_elements[0:i + 2])
if self._exclude_profile(profile_group):
- self.display.vvvv('Excluding profile %s\n' % profile_group)
+ self.display.vvvv(f'Excluding profile {profile_group}\n')
break
group_name = self._add_safe_group_name(profile_group)
- self.display.vvvv('Added profile group %s\n' % group_name)
+ self.display.vvvv(f'Added profile group {group_name}\n')
child_group_name = self._add_safe_group_name(profile_group_child)
- self.display.vvvv('Added profile child group %s to %s\n' % (child_group_name, group_name))
+ self.display.vvvv(f'Added profile child group {child_group_name} to {group_name}\n')
self.inventory.add_child(group_name, child_group_name)
i = i + 1
@@ -234,54 +303,112 @@ class InventoryModule(BaseInventoryPlugin, Cacheable):
self.group = to_safe_group_name(self.get_option('group'))
if self.group is not None and self.group != '':
self.inventory.add_group(self.group)
- self.display.vvvv('Added site group %s\n' % self.group)
+ self.display.vvvv(f'Added site group {self.group}\n')
+ ip_addresses = {}
+ ipv6_addresses = {}
for host in self._get_systems():
# Get the FQDN for the host and add it to the right groups
- hostname = host['hostname'] # None
+ if self.inventory_hostname == 'system':
+ hostname = make_unsafe(host['name']) # None
+ else:
+ hostname = make_unsafe(host['hostname']) # None
interfaces = host['interfaces']
- if self._exclude_profile(host['profile']):
- self.display.vvvv('Excluding host %s in profile %s\n' % (host['name'], host['profile']))
- continue
+ if set(host['mgmt_classes']) & set(self.include_mgmt_classes):
+ self.display.vvvv(f"Including host {host['name']} in mgmt_classes {host['mgmt_classes']}\n")
+ else:
+ if self._exclude_profile(host['profile']):
+ self.display.vvvv(f"Excluding host {host['name']} in profile {host['profile']}\n")
+ continue
+
+ if set(host['mgmt_classes']) & set(self.exclude_mgmt_classes):
+ self.display.vvvv(f"Excluding host {host['name']} in mgmt_classes {host['mgmt_classes']}\n")
+ continue
# hostname is often empty for non-static IP hosts
if hostname == '':
- for (iname, ivalue) in iteritems(interfaces):
+ for iname, ivalue in interfaces.items():
if ivalue['management'] or not ivalue['static']:
this_dns_name = ivalue.get('dns_name', None)
if this_dns_name is not None and this_dns_name != "":
- hostname = this_dns_name
- self.display.vvvv('Set hostname to %s from %s\n' % (hostname, iname))
+ hostname = make_unsafe(this_dns_name)
+ self.display.vvvv(f'Set hostname to {hostname} from {iname}\n')
if hostname == '':
- self.display.vvvv('Cannot determine hostname for host %s, skipping\n' % host['name'])
+ self.display.vvvv(f"Cannot determine hostname for host {host['name']}, skipping\n")
continue
self.inventory.add_host(hostname)
- self.display.vvvv('Added host %s hostname %s\n' % (host['name'], hostname))
+ self.display.vvvv(f"Added host {host['name']} hostname {hostname}\n")
# Add host to profile group
- group_name = self._add_safe_group_name(host['profile'], child=hostname)
- self.display.vvvv('Added host %s to profile group %s\n' % (hostname, group_name))
+ if host['profile'] != '':
+ group_name = self._add_safe_group_name(host['profile'], child=hostname)
+ self.display.vvvv(f'Added host {hostname} to profile group {group_name}\n')
+ else:
+ self.display.warning(f'Host {hostname} has an empty profile\n')
# Add host to groups specified by group_by fields
for group_by in self.group_by:
- if host[group_by] == '<>':
+ if host[group_by] == '<>' or host[group_by] == '':
groups = []
else:
groups = [host[group_by]] if isinstance(host[group_by], str) else host[group_by]
for group in groups:
group_name = self._add_safe_group_name(group, child=hostname)
- self.display.vvvv('Added host %s to group_by %s group %s\n' % (hostname, group_by, group_name))
+ self.display.vvvv(f'Added host {hostname} to group_by {group_by} group {group_name}\n')
# Add to group for this inventory
if self.group is not None:
self.inventory.add_child(self.group, hostname)
# Add host variables
+ ip_address = None
+ ip_address_first = None
+ ipv6_address = None
+ ipv6_address_first = None
+ for iname, ivalue in interfaces.items():
+ # Set to first interface or management interface if defined or hostname matches dns_name
+ if ivalue['ip_address'] != "":
+ if ip_address_first is None:
+ ip_address_first = ivalue['ip_address']
+ if ivalue['management']:
+ ip_address = ivalue['ip_address']
+ elif ivalue['dns_name'] == hostname and ip_address is None:
+ ip_address = ivalue['ip_address']
+ if ivalue['ipv6_address'] != "":
+ if ipv6_address_first is None:
+ ipv6_address_first = ivalue['ipv6_address']
+ if ivalue['management']:
+ ipv6_address = ivalue['ipv6_address']
+ elif ivalue['dns_name'] == hostname and ipv6_address is None:
+ ipv6_address = ivalue['ipv6_address']
+
+ # Collect all interface name mappings for adding to group vars
+ if self.get_option('want_ip_addresses'):
+ if ivalue['dns_name'] != "":
+ if ivalue['ip_address'] != "":
+ ip_addresses[ivalue['dns_name']] = ivalue['ip_address']
+ if ivalue['ipv6_address'] != "":
+ ip_addresses[ivalue['dns_name']] = ivalue['ipv6_address']
+
+ # Add ip_address to host if defined, use first if no management or matched dns_name
+ if ip_address is None and ip_address_first is not None:
+ ip_address = ip_address_first
+ if ip_address is not None:
+ self.inventory.set_variable(hostname, 'cobbler_ipv4_address', make_unsafe(ip_address))
+ if ipv6_address is None and ipv6_address_first is not None:
+ ipv6_address = ipv6_address_first
+ if ipv6_address is not None:
+ self.inventory.set_variable(hostname, 'cobbler_ipv6_address', make_unsafe(ipv6_address))
+
if self.get_option('want_facts'):
try:
- self.inventory.set_variable(hostname, 'cobbler', host)
+ self.inventory.set_variable(hostname, 'cobbler', make_unsafe(host))
except ValueError as e:
- self.display.warning("Could not set host info for %s: %s" % (hostname, to_text(e)))
+ self.display.warning(f"Could not set host info for {hostname}: {e}")
+
+ if self.get_option('want_ip_addresses'):
+ self.inventory.set_variable(self.group, 'cobbler_ipv4_addresses', make_unsafe(ip_addresses))
+ self.inventory.set_variable(self.group, 'cobbler_ipv6_addresses', make_unsafe(ipv6_addresses))
diff --git a/plugins/inventory/gitlab_runners.py b/plugins/inventory/gitlab_runners.py
index 8279d8e781..4a2b32680e 100644
--- a/plugins/inventory/gitlab_runners.py
+++ b/plugins/inventory/gitlab_runners.py
@@ -1,72 +1,71 @@
-# -*- coding: utf-8 -*-
# Copyright (c) 2018, Stefan Heitmueller
# Copyright (c) 2018 Ansible Project
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import (absolute_import, division, print_function)
+from __future__ import annotations
-__metaclass__ = type
-DOCUMENTATION = '''
- name: gitlab_runners
- author:
- - Stefan Heitmüller (@morph027)
- short_description: Ansible dynamic inventory plugin for GitLab runners.
- requirements:
- - python >= 2.7
- - python-gitlab > 1.8.0
- extends_documentation_fragment:
- - constructed
- description:
- - Reads inventories from the GitLab API.
- - Uses a YAML configuration file gitlab_runners.[yml|yaml].
- options:
- plugin:
- description: The name of this plugin, it should always be set to 'gitlab_runners' for this plugin to recognize it as it's own.
- type: str
- required: true
- choices:
- - gitlab_runners
- - community.general.gitlab_runners
- server_url:
- description: The URL of the GitLab server, with protocol (i.e. http or https).
- env:
- - name: GITLAB_SERVER_URL
- version_added: 1.0.0
- type: str
- required: true
- api_token:
- description: GitLab token for logging in.
- env:
- - name: GITLAB_API_TOKEN
- version_added: 1.0.0
- type: str
- aliases:
- - private_token
- - access_token
- filter:
- description: filter runners from GitLab API
- env:
- - name: GITLAB_FILTER
- version_added: 1.0.0
- type: str
- choices: ['active', 'paused', 'online', 'specific', 'shared']
- verbose_output:
- description: Toggle to (not) include all available nodes metadata
- type: bool
- default: yes
-'''
+DOCUMENTATION = r"""
+name: gitlab_runners
+author:
+ - Stefan Heitmüller (@morph027)
+short_description: Ansible dynamic inventory plugin for GitLab runners
+requirements:
+ - python-gitlab > 1.8.0
+extends_documentation_fragment:
+ - constructed
+description:
+ - Reads inventories from the GitLab API.
+ - Uses a YAML configuration file gitlab_runners.[yml|yaml].
+options:
+ plugin:
+ description: The name of this plugin, it should always be set to V(gitlab_runners) for this plugin to recognize it as its own.
+ type: str
+ required: true
+ choices:
+ - gitlab_runners
+ - community.general.gitlab_runners
+ server_url:
+ description: The URL of the GitLab server, with protocol (i.e. http or https).
+ env:
+ - name: GITLAB_SERVER_URL
+ version_added: 1.0.0
+ type: str
+ required: true
+ api_token:
+ description: GitLab token for logging in.
+ env:
+ - name: GITLAB_API_TOKEN
+ version_added: 1.0.0
+ type: str
+ aliases:
+ - private_token
+ - access_token
+ filter:
+ description: Filter runners from GitLab API.
+ env:
+ - name: GITLAB_FILTER
+ version_added: 1.0.0
+ type: str
+ choices: ['active', 'paused', 'online', 'specific', 'shared']
+ verbose_output:
+ description: Toggle to (not) include all available nodes metadata.
+ type: bool
+ default: true
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
+---
# gitlab_runners.yml
plugin: community.general.gitlab_runners
host: https://gitlab.com
+---
# Example using constructed features to create groups and set ansible_host
plugin: community.general.gitlab_runners
host: https://gitlab.com
-strict: False
+strict: false
keyed_groups:
# add e.g. amd64 hosts to an arch_amd64 group
- prefix: arch
@@ -79,12 +78,13 @@ keyed_groups:
# hint: labels containing special characters will be converted to safe names
- key: 'tag_list'
prefix: tag
-'''
+"""
from ansible.errors import AnsibleError, AnsibleParserError
-from ansible.module_utils.common.text.converters import to_native
from ansible.plugins.inventory import BaseInventoryPlugin, Constructable
+from ansible_collections.community.general.plugins.plugin_utils.unsafe import make_unsafe
+
try:
import gitlab
HAS_GITLAB = True
@@ -106,11 +106,11 @@ class InventoryModule(BaseInventoryPlugin, Constructable):
else:
runners = gl.runners.all()
for runner in runners:
- host = str(runner['id'])
+ host = make_unsafe(str(runner['id']))
ip_address = runner['ip_address']
- host_attrs = vars(gl.runners.get(runner['id']))['_attrs']
+ host_attrs = make_unsafe(vars(gl.runners.get(runner['id']))['_attrs'])
self.inventory.add_host(host, group='gitlab_runners')
- self.inventory.set_variable(host, 'ansible_host', ip_address)
+ self.inventory.set_variable(host, 'ansible_host', make_unsafe(ip_address))
if self.get_option('verbose_output', True):
self.inventory.set_variable(host, 'gitlab_runner_attributes', host_attrs)
@@ -123,7 +123,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable):
# Create groups based on variable values and add the corresponding hosts to it
self._add_host_to_keyed_groups(self.get_option('keyed_groups'), host_attrs, host, strict=strict)
except Exception as e:
- raise AnsibleParserError('Unable to fetch hosts from GitLab API, this was the original exception: %s' % to_native(e))
+ raise AnsibleParserError(f'Unable to fetch hosts from GitLab API, this was the original exception: {e}')
def verify_file(self, path):
"""Return the possibly of a file being consumable by this plugin."""
diff --git a/plugins/inventory/icinga2.py b/plugins/inventory/icinga2.py
index 70e0f57332..017959f403 100644
--- a/plugins/inventory/icinga2.py
+++ b/plugins/inventory/icinga2.py
@@ -1,78 +1,81 @@
-# -*- coding: utf-8 -*-
# Copyright (c) 2021, Cliff Hults
# Copyright (c) 2021 Ansible Project
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import (absolute_import, division, print_function)
+from __future__ import annotations
-__metaclass__ = type
-DOCUMENTATION = '''
- name: icinga2
- short_description: Icinga2 inventory source
- version_added: 3.7.0
- author:
- - Cliff Hults (@BongoEADGC6)
+DOCUMENTATION = r"""
+name: icinga2
+short_description: Icinga2 inventory source
+version_added: 3.7.0
+author:
+ - Cliff Hults (@BongoEADGC6)
+description:
+ - Get inventory hosts from the Icinga2 API.
+ - Uses a configuration file as an inventory source, it must end in C(.icinga2.yml) or C(.icinga2.yaml).
+extends_documentation_fragment:
+ - constructed
+options:
+ strict:
+ version_added: 4.4.0
+ compose:
+ version_added: 4.4.0
+ groups:
+ version_added: 4.4.0
+ keyed_groups:
+ version_added: 4.4.0
+ plugin:
+ description: Name of the plugin.
+ required: true
+ type: string
+ choices: ['community.general.icinga2']
+ url:
+ description: Root URL of Icinga2 API.
+ type: string
+ required: true
+ user:
+ description: Username to query the API.
+ type: string
+ required: true
+ password:
+ description: Password to query the API.
+ type: string
+ required: true
+ host_filter:
description:
- - Get inventory hosts from the Icinga2 API.
- - "Uses a configuration file as an inventory source, it must end in
- C(.icinga2.yml) or C(.icinga2.yaml)."
- extends_documentation_fragment:
- - constructed
- options:
- strict:
- version_added: 4.4.0
- compose:
- version_added: 4.4.0
- groups:
- version_added: 4.4.0
- keyed_groups:
- version_added: 4.4.0
- plugin:
- description: Name of the plugin.
- required: true
- type: string
- choices: ['community.general.icinga2']
- url:
- description: Root URL of Icinga2 API.
- type: string
- required: true
- user:
- description: Username to query the API.
- type: string
- required: true
- password:
- description: Password to query the API.
- type: string
- required: true
- host_filter:
- description:
- - An Icinga2 API valid host filter. Leave blank for no filtering
- type: string
- required: false
- validate_certs:
- description: Enables or disables SSL certificate verification.
- type: boolean
- default: true
- inventory_attr:
- description:
- - Allows the override of the inventory name based on different attributes.
- - This allows for changing the way limits are used.
- - The current default, C(address), is sometimes not unique or present. We recommend to use C(name) instead.
- type: string
- default: address
- choices: ['name', 'display_name', 'address']
- version_added: 4.2.0
-'''
+ - An Icinga2 API valid host filter. Leave blank for no filtering.
+ type: string
+ required: false
+ validate_certs:
+ description: Enables or disables SSL certificate verification.
+ type: boolean
+ default: true
+ inventory_attr:
+ description:
+ - Allows the override of the inventory name based on different attributes.
+ - This allows for changing the way limits are used.
+ - The current default, V(address), is sometimes not unique or present. We recommend to use V(name) instead.
+ type: string
+ default: address
+ choices: ['name', 'display_name', 'address']
+ version_added: 4.2.0
+ group_by_hostgroups:
+ description:
+ - Uses Icinga2 hostgroups as groups.
+ type: boolean
+ default: true
+ version_added: 8.4.0
+"""
-EXAMPLES = r'''
+EXAMPLES = r"""
# my.icinga2.yml
plugin: community.general.icinga2
url: http://localhost:5665
user: ansible
password: secure
host_filter: \"linux-servers\" in host.groups
-validate_certs: false
+validate_certs: false # only do this when connecting to localhost!
inventory_attr: name
groups:
# simple name matching
@@ -88,14 +91,16 @@ compose:
# set 'ansible_user' and 'ansible_port' from icinga2 host vars
ansible_user: icinga2_attributes.vars.ansible_user
ansible_port: icinga2_attributes.vars.ansible_port | default(22)
-'''
+"""
import json
+from urllib.error import HTTPError
from ansible.errors import AnsibleParserError
from ansible.plugins.inventory import BaseInventoryPlugin, Constructable
from ansible.module_utils.urls import open_url
-from ansible.module_utils.six.moves.urllib.error import HTTPError
+
+from ansible_collections.community.general.plugins.plugin_utils.unsafe import make_unsafe
class InventoryModule(BaseInventoryPlugin, Constructable):
@@ -114,6 +119,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable):
self.ssl_verify = None
self.host_filter = None
self.inventory_attr = None
+ self.group_by_hostgroups = None
self.cache_key = None
self.use_cache = None
@@ -132,7 +138,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable):
'User-Agent': "ansible-icinga2-inv",
'Accept': "application/json",
}
- api_status_url = self.icinga2_url + "/status"
+ api_status_url = f"{self.icinga2_url}/status"
request_args = {
'headers': self.headers,
'url_username': self.icinga2_user,
@@ -142,7 +148,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable):
open_url(api_status_url, **request_args)
def _post_request(self, request_url, data=None):
- self.display.vvv("Requested URL: %s" % request_url)
+ self.display.vvv(f"Requested URL: {request_url}")
request_args = {
'headers': self.headers,
'url_username': self.icinga2_user,
@@ -151,42 +157,38 @@ class InventoryModule(BaseInventoryPlugin, Constructable):
}
if data is not None:
request_args['data'] = json.dumps(data)
- self.display.vvv("Request Args: %s" % request_args)
+ self.display.vvv(f"Request Args: {request_args}")
try:
response = open_url(request_url, **request_args)
except HTTPError as e:
try:
error_body = json.loads(e.read().decode())
- self.display.vvv("Error returned: {0}".format(error_body))
+ self.display.vvv(f"Error returned: {error_body}")
except Exception:
error_body = {"status": None}
if e.code == 404 and error_body.get('status') == "No objects found.":
raise AnsibleParserError("Host filter returned no data. Please confirm your host_filter value is valid")
- raise AnsibleParserError("Unexpected data returned: {0} -- {1}".format(e, error_body))
+ raise AnsibleParserError(f"Unexpected data returned: {e} -- {error_body}")
response_body = response.read()
json_data = json.loads(response_body.decode('utf-8'))
- self.display.vvv("Returned Data: %s" % json.dumps(json_data, indent=4, sort_keys=True))
+ self.display.vvv(f"Returned Data: {json.dumps(json_data, indent=4, sort_keys=True)}")
if 200 <= response.status <= 299:
return json_data
if response.status == 404 and json_data['status'] == "No objects found.":
raise AnsibleParserError(
- "API returned no data -- Response: %s - %s"
- % (response.status, json_data['status']))
+ f"API returned no data -- Response: {response.status} - {json_data['status']}")
if response.status == 401:
raise AnsibleParserError(
- "API was unable to complete query -- Response: %s - %s"
- % (response.status, json_data['status']))
+ f"API was unable to complete query -- Response: {response.status} - {json_data['status']}")
if response.status == 500:
raise AnsibleParserError(
- "API Response - %s - %s"
- % (json_data['status'], json_data['errors']))
+ f"API Response - {json_data['status']} - {json_data['errors']}")
raise AnsibleParserError(
- "Unexpected data returned - %s - %s"
- % (json_data['status'], json_data['errors']))
+ f"Unexpected data returned - {json_data['status']} - {json_data['errors']}")
def _query_hosts(self, hosts=None, attrs=None, joins=None, host_filter=None):
- query_hosts_url = "{0}/objects/hosts".format(self.icinga2_url)
+ query_hosts_url = f"{self.icinga2_url}/objects/hosts"
self.headers['X-HTTP-Method-Override'] = 'GET'
data_dict = dict()
if hosts:
@@ -233,31 +235,32 @@ class InventoryModule(BaseInventoryPlugin, Constructable):
"""Convert Icinga2 API data to JSON format for Ansible"""
groups_dict = {"_meta": {"hostvars": {}}}
for entry in json_data:
- host_attrs = entry['attrs']
+ host_attrs = make_unsafe(entry['attrs'])
if self.inventory_attr == "name":
- host_name = entry.get('name')
+ host_name = make_unsafe(entry.get('name'))
if self.inventory_attr == "address":
# When looking for address for inventory, if missing fallback to object name
if host_attrs.get('address', '') != '':
- host_name = host_attrs.get('address')
+ host_name = make_unsafe(host_attrs.get('address'))
else:
- host_name = entry.get('name')
+ host_name = make_unsafe(entry.get('name'))
if self.inventory_attr == "display_name":
host_name = host_attrs.get('display_name')
if host_attrs['state'] == 0:
host_attrs['state'] = 'on'
else:
host_attrs['state'] = 'off'
- host_groups = host_attrs.get('groups')
self.inventory.add_host(host_name)
- for group in host_groups:
- if group not in self.inventory.groups.keys():
- self.inventory.add_group(group)
- self.inventory.add_child(group, host_name)
+ if self.group_by_hostgroups:
+ host_groups = host_attrs.get('groups')
+ for group in host_groups:
+ if group not in self.inventory.groups.keys():
+ self.inventory.add_group(group)
+ self.inventory.add_child(group, host_name)
# If the address attribute is populated, override ansible_host with the value
if host_attrs.get('address') != '':
self.inventory.set_variable(host_name, 'ansible_host', host_attrs.get('address'))
- self.inventory.set_variable(host_name, 'hostname', entry.get('name'))
+ self.inventory.set_variable(host_name, 'hostname', make_unsafe(entry.get('name')))
self.inventory.set_variable(host_name, 'display_name', host_attrs.get('display_name'))
self.inventory.set_variable(host_name, 'state',
host_attrs['state'])
@@ -277,12 +280,23 @@ class InventoryModule(BaseInventoryPlugin, Constructable):
self._read_config_data(path)
# Store the options from the YAML file
- self.icinga2_url = self.get_option('url').rstrip('/') + '/v1'
+ self.icinga2_url = self.get_option('url')
self.icinga2_user = self.get_option('user')
self.icinga2_password = self.get_option('password')
self.ssl_verify = self.get_option('validate_certs')
self.host_filter = self.get_option('host_filter')
self.inventory_attr = self.get_option('inventory_attr')
+ self.group_by_hostgroups = self.get_option('group_by_hostgroups')
+
+ if self.templar.is_template(self.icinga2_url):
+ self.icinga2_url = self.templar.template(variable=self.icinga2_url)
+ if self.templar.is_template(self.icinga2_user):
+ self.icinga2_user = self.templar.template(variable=self.icinga2_user)
+ if self.templar.is_template(self.icinga2_password):
+ self.icinga2_password = self.templar.template(variable=self.icinga2_password)
+
+ self.icinga2_url = f"{self.icinga2_url.rstrip('/')}/v1"
+
# Not currently enabled
# self.cache_key = self.get_cache_key(path)
# self.use_cache = cache and self.get_option('cache')
diff --git a/plugins/inventory/iocage.py b/plugins/inventory/iocage.py
new file mode 100644
index 0000000000..9d4cef4a03
--- /dev/null
+++ b/plugins/inventory/iocage.py
@@ -0,0 +1,418 @@
+
+# Copyright (c) 2024 Vladimir Botka
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import annotations
+
+DOCUMENTATION = r"""
+name: iocage
+short_description: C(iocage) inventory source
+version_added: 10.2.0
+author:
+ - Vladimir Botka (@vbotka)
+requirements:
+ - iocage >= 1.8
+description:
+ - Get inventory hosts from the C(iocage) jail manager running on O(host).
+ - By default, O(host) is V(localhost). If O(host) is not V(localhost) it is expected that the user running Ansible on the
+ controller can connect to the O(host) account O(user) with SSH non-interactively and execute the command C(iocage list).
+ - Uses a configuration file as an inventory source, it must end in C(.iocage.yml) or C(.iocage.yaml).
+extends_documentation_fragment:
+ - ansible.builtin.constructed
+ - ansible.builtin.inventory_cache
+options:
+ plugin:
+ description:
+ - The name of this plugin, it should always be set to V(community.general.iocage) for this plugin to recognize it as
+ its own.
+ required: true
+ choices: ['community.general.iocage']
+ type: str
+ host:
+ description: The IP/hostname of the C(iocage) host.
+ type: str
+ default: localhost
+ user:
+ description:
+ - C(iocage) user. It is expected that the O(user) is able to connect to the O(host) with SSH and execute the command
+ C(iocage list). This option is not required if O(host=localhost).
+ type: str
+ sudo:
+ description:
+ - Enable execution as root.
+ - This requires passwordless sudo of the command C(iocage list*).
+ type: bool
+ default: false
+ version_added: 10.3.0
+ sudo_preserve_env:
+ description:
+ - Preserve environment if O(sudo) is enabled.
+ - This requires C(SETENV) sudoers tag.
+ type: bool
+ default: false
+ version_added: 10.3.0
+ get_properties:
+ description:
+ - Get jails' properties. Creates dictionary C(iocage_properties) for each added host.
+ type: bool
+ default: false
+ env:
+ description:
+ - O(user)'s environment on O(host).
+ - Enable O(sudo_preserve_env) if O(sudo) is enabled.
+ type: dict
+ default: {}
+ hooks_results:
+ description:
+ - List of paths to the files in a jail.
+ - Content of the files is stored in the items of the list C(iocage_hooks).
+ - If a file is not available the item keeps the dash character C(-).
+ - The variable C(iocage_hooks) is not created if O(hooks_results) is empty.
+ type: list
+ elements: path
+ version_added: 10.4.0
+ inventory_hostname_tag:
+ description:
+ - The name of the tag in the C(iocage properties notes) that contains the jails alias.
+ - By default, the C(iocage list -l) column C(NAME) is used to name the jail.
+ - This option requires the notes format C("t1=v1 t2=v2 ...").
+ - The option O(get_properties) must be enabled.
+ type: str
+ version_added: 11.0.0
+ inventory_hostname_required:
+ description:
+ - If enabled, the tag declared in O(inventory_hostname_tag) is required.
+ type: bool
+ default: false
+ version_added: 11.0.0
+notes:
+ - You might want to test the command C(ssh user@host iocage list -l) on the controller before using this inventory plugin
+ with O(user) specified and with O(host) other than V(localhost).
+ - If you run this inventory plugin on V(localhost) C(ssh) is not used. In this case, test the command C(iocage list -l).
+ - This inventory plugin creates variables C(iocage_*) for each added host.
+ - The values of these variables are collected from the output of the command C(iocage list -l).
+ - The names of these variables correspond to the output columns.
+ - The column C(NAME) is used to name the added host.
+ - The option O(hooks_results) expects the C(poolname) of a jail is mounted to C(/poolname). For example, if you activate
+ the pool C(iocage) this plugin expects to find the O(hooks_results) items in the path C(/iocage/iocage/jails//root).
+ If you mount the C(poolname) to a different path the easiest remedy is to create a symlink.
+"""
+
+EXAMPLES = r"""
+---
+# file name must end with iocage.yaml or iocage.yml
+plugin: community.general.iocage
+host: 10.1.0.73
+user: admin
+
+---
+# user is not required if iocage is running on localhost (default)
+plugin: community.general.iocage
+
+---
+# run cryptography without legacy algorithms
+plugin: community.general.iocage
+host: 10.1.0.73
+user: admin
+env:
+ CRYPTOGRAPHY_OPENSSL_NO_LEGACY: 1
+
+---
+# execute as root
+# sudoers example 'admin ALL=(ALL) NOPASSWD:SETENV: /usr/local/bin/iocage list*'
+plugin: community.general.iocage
+host: 10.1.0.73
+user: admin
+sudo: true
+sudo_preserve_env: true
+env:
+ CRYPTOGRAPHY_OPENSSL_NO_LEGACY: 1
+
+---
+# enable cache
+plugin: community.general.iocage
+host: 10.1.0.73
+user: admin
+env:
+ CRYPTOGRAPHY_OPENSSL_NO_LEGACY: 1
+cache: true
+
+---
+# see inventory plugin ansible.builtin.constructed
+plugin: community.general.iocage
+host: 10.1.0.73
+user: admin
+env:
+ CRYPTOGRAPHY_OPENSSL_NO_LEGACY: 1
+cache: true
+strict: false
+compose:
+ ansible_host: iocage_ip4
+ release: iocage_release | split('-') | first
+groups:
+ test: inventory_hostname.startswith('test')
+keyed_groups:
+ - prefix: distro
+ key: iocage_release
+ - prefix: state
+ key: iocage_state
+
+---
+# Read the file /var/db/dhclient-hook.address.epair0b in the jails and use it as ansible_host
+plugin: community.general.iocage
+host: 10.1.0.73
+user: admin
+hooks_results:
+ - /var/db/dhclient-hook.address.epair0b
+compose:
+ ansible_host: iocage_hooks.0
+groups:
+ test: inventory_hostname.startswith('test')
+"""
+
+import re
+import os
+from subprocess import Popen, PIPE
+
+from ansible.errors import AnsibleError, AnsibleParserError
+from ansible.module_utils.common.text.converters import to_native, to_text
+from ansible.plugins.inventory import BaseInventoryPlugin, Constructable, Cacheable
+from ansible.utils.display import Display
+
+display = Display()
+
+
+def _parse_ip4(ip4):
+ ''' Return dictionary iocage_ip4_dict. default = {ip4: [], msg: ''}.
+ If item matches ifc|IP or ifc|CIDR parse ifc, ip, and mask.
+ Otherwise, append item to msg.
+ '''
+
+ iocage_ip4_dict = {}
+ iocage_ip4_dict['ip4'] = []
+ iocage_ip4_dict['msg'] = ''
+
+ items = ip4.split(',')
+ for item in items:
+ if re.match('^\\w+\\|(?:\\d{1,3}\\.){3}\\d{1,3}.*$', item):
+ i = re.split('\\||/', item)
+ if len(i) == 3:
+ iocage_ip4_dict['ip4'].append({'ifc': i[0], 'ip': i[1], 'mask': i[2]})
+ else:
+ iocage_ip4_dict['ip4'].append({'ifc': i[0], 'ip': i[1], 'mask': '-'})
+ else:
+ iocage_ip4_dict['msg'] += item
+
+ return iocage_ip4_dict
+
+
+class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
+ ''' Host inventory parser for ansible using iocage as source. '''
+
+ NAME = 'community.general.iocage'
+ IOCAGE = '/usr/local/bin/iocage'
+
+ def __init__(self):
+ super(InventoryModule, self).__init__()
+
+ def verify_file(self, path):
+ valid = False
+ if super(InventoryModule, self).verify_file(path):
+ if path.endswith(('iocage.yaml', 'iocage.yml')):
+ valid = True
+ else:
+ self.display.vvv('Skipping due to inventory source not ending in "iocage.yaml" nor "iocage.yml"')
+ return valid
+
+ def parse(self, inventory, loader, path, cache=True):
+ super(InventoryModule, self).parse(inventory, loader, path)
+
+ self._read_config_data(path)
+ cache_key = self.get_cache_key(path)
+
+ user_cache_setting = self.get_option('cache')
+ attempt_to_read_cache = user_cache_setting and cache
+ cache_needs_update = user_cache_setting and not cache
+
+ if attempt_to_read_cache:
+ try:
+ results = self._cache[cache_key]
+ except KeyError:
+ cache_needs_update = True
+ if not attempt_to_read_cache or cache_needs_update:
+ results = self.get_inventory(path)
+ if cache_needs_update:
+ self._cache[cache_key] = results
+
+ self.populate(results)
+
+ def get_inventory(self, path):
+ host = self.get_option('host')
+ sudo = self.get_option('sudo')
+ sudo_preserve_env = self.get_option('sudo_preserve_env')
+ env = self.get_option('env')
+ get_properties = self.get_option('get_properties')
+ hooks_results = self.get_option('hooks_results')
+ inventory_hostname_tag = self.get_option('inventory_hostname_tag')
+ inventory_hostname_required = self.get_option('inventory_hostname_required')
+
+ cmd = []
+ my_env = os.environ.copy()
+ if host == 'localhost':
+ my_env.update({str(k): str(v) for k, v in env.items()})
+ else:
+ user = self.get_option('user')
+ cmd.append("ssh")
+ cmd.append(f"{user}@{host}")
+ cmd.extend([f"{k}={v}" for k, v in env.items()])
+
+ cmd_list = cmd.copy()
+ if sudo:
+ cmd_list.append('sudo')
+ if sudo_preserve_env:
+ cmd_list.append('--preserve-env')
+ cmd_list.append(self.IOCAGE)
+ cmd_list.append('list')
+ cmd_list.append('--long')
+ try:
+ p = Popen(cmd_list, stdout=PIPE, stderr=PIPE, env=my_env)
+ stdout, stderr = p.communicate()
+ if p.returncode != 0:
+ raise AnsibleError(f'Failed to run cmd={cmd_list}, rc={p.returncode}, stderr={to_native(stderr)}')
+
+ try:
+ t_stdout = to_text(stdout, errors='surrogate_or_strict')
+ except UnicodeError as e:
+ raise AnsibleError(f'Invalid (non unicode) input returned: {e}') from e
+
+ except Exception as e:
+ raise AnsibleParserError(f'Failed to parse {to_native(path)}: {e}') from e
+
+ results = {'_meta': {'hostvars': {}}}
+ self.get_jails(t_stdout, results)
+
+ if get_properties:
+ for hostname, host_vars in results['_meta']['hostvars'].items():
+ cmd_get_properties = cmd.copy()
+ cmd_get_properties.append(self.IOCAGE)
+ cmd_get_properties.append("get")
+ cmd_get_properties.append("--all")
+ cmd_get_properties.append(f"{hostname}")
+ try:
+ p = Popen(cmd_get_properties, stdout=PIPE, stderr=PIPE, env=my_env)
+ stdout, stderr = p.communicate()
+ if p.returncode != 0:
+ raise AnsibleError(
+ f'Failed to run cmd={cmd_get_properties}, rc={p.returncode}, stderr={to_native(stderr)}')
+
+ try:
+ t_stdout = to_text(stdout, errors='surrogate_or_strict')
+ except UnicodeError as e:
+ raise AnsibleError(f'Invalid (non unicode) input returned: {e}') from e
+
+ except Exception as e:
+ raise AnsibleError(f'Failed to get properties: {e}') from e
+
+ self.get_properties(t_stdout, results, hostname)
+
+ if hooks_results:
+ cmd_get_pool = cmd.copy()
+ cmd_get_pool.append(self.IOCAGE)
+ cmd_get_pool.append('get')
+ cmd_get_pool.append('--pool')
+ try:
+ p = Popen(cmd_get_pool, stdout=PIPE, stderr=PIPE, env=my_env)
+ stdout, stderr = p.communicate()
+ if p.returncode != 0:
+ raise AnsibleError(
+ f'Failed to run cmd={cmd_get_pool}, rc={p.returncode}, stderr={to_native(stderr)}')
+ try:
+ iocage_pool = to_text(stdout, errors='surrogate_or_strict').strip()
+ except UnicodeError as e:
+ raise AnsibleError(f'Invalid (non unicode) input returned: {e}') from e
+ except Exception as e:
+ raise AnsibleError(f'Failed to get pool: {e}') from e
+
+ for hostname, host_vars in results['_meta']['hostvars'].items():
+ iocage_hooks = []
+ for hook in hooks_results:
+ path = f"/{iocage_pool}/iocage/jails/{hostname}/root{hook}"
+ cmd_cat_hook = cmd.copy()
+ cmd_cat_hook.append('cat')
+ cmd_cat_hook.append(path)
+ try:
+ p = Popen(cmd_cat_hook, stdout=PIPE, stderr=PIPE, env=my_env)
+ stdout, stderr = p.communicate()
+ if p.returncode != 0:
+ iocage_hooks.append('-')
+ continue
+
+ try:
+ iocage_hook = to_text(stdout, errors='surrogate_or_strict').strip()
+ except UnicodeError as e:
+ raise AnsibleError(f'Invalid (non unicode) input returned: {e}') from e
+
+ except Exception:
+ iocage_hooks.append('-')
+ else:
+ iocage_hooks.append(iocage_hook)
+
+ results['_meta']['hostvars'][hostname]['iocage_hooks'] = iocage_hooks
+
+ # Optionally, get the jails names from the properties notes.
+ # Requires the notes format "t1=v1 t2=v2 ..."
+ if inventory_hostname_tag:
+ if not get_properties:
+ raise AnsibleError('Jail properties are needed to use inventory_hostname_tag. Enable get_properties')
+ update = {}
+ for hostname, host_vars in results['_meta']['hostvars'].items():
+ tags = dict(tag.split('=', 1) for tag in host_vars['iocage_properties']['notes'].split() if '=' in tag)
+ if inventory_hostname_tag in tags:
+ update[hostname] = tags[inventory_hostname_tag]
+ elif inventory_hostname_required:
+ raise AnsibleError(f'Mandatory tag {inventory_hostname_tag!r} is missing in the properties notes.')
+ for hostname, alias in update.items():
+ results['_meta']['hostvars'][alias] = results['_meta']['hostvars'].pop(hostname)
+
+ return results
+
+ def get_jails(self, t_stdout, results):
+ lines = t_stdout.splitlines()
+ if len(lines) < 5:
+ return
+ indices = [i for i, val in enumerate(lines[1]) if val == '|']
+ for line in lines[3::2]:
+ jail = [line[i + 1:j].strip() for i, j in zip(indices[:-1], indices[1:])]
+ iocage_name = jail[1]
+ iocage_ip4_dict = _parse_ip4(jail[6])
+ if iocage_ip4_dict['ip4']:
+ iocage_ip4 = ','.join([d['ip'] for d in iocage_ip4_dict['ip4']])
+ else:
+ iocage_ip4 = '-'
+ results['_meta']['hostvars'][iocage_name] = {}
+ results['_meta']['hostvars'][iocage_name]['iocage_jid'] = jail[0]
+ results['_meta']['hostvars'][iocage_name]['iocage_boot'] = jail[2]
+ results['_meta']['hostvars'][iocage_name]['iocage_state'] = jail[3]
+ results['_meta']['hostvars'][iocage_name]['iocage_type'] = jail[4]
+ results['_meta']['hostvars'][iocage_name]['iocage_release'] = jail[5]
+ results['_meta']['hostvars'][iocage_name]['iocage_ip4_dict'] = iocage_ip4_dict
+ results['_meta']['hostvars'][iocage_name]['iocage_ip4'] = iocage_ip4
+ results['_meta']['hostvars'][iocage_name]['iocage_ip6'] = jail[7]
+ results['_meta']['hostvars'][iocage_name]['iocage_template'] = jail[8]
+ results['_meta']['hostvars'][iocage_name]['iocage_basejail'] = jail[9]
+
+ def get_properties(self, t_stdout, results, hostname):
+ properties = dict(x.split(':', 1) for x in t_stdout.splitlines())
+ results['_meta']['hostvars'][hostname]['iocage_properties'] = properties
+
+ def populate(self, results):
+ strict = self.get_option('strict')
+
+ for hostname, host_vars in results['_meta']['hostvars'].items():
+ self.inventory.add_host(hostname, group='all')
+ for var, value in host_vars.items():
+ self.inventory.set_variable(hostname, var, value)
+ self._set_composite_vars(self.get_option('compose'), host_vars, hostname, strict=True)
+ self._add_host_to_composed_groups(self.get_option('groups'), host_vars, hostname, strict=strict)
+ self._add_host_to_keyed_groups(self.get_option('keyed_groups'), host_vars, hostname, strict=strict)
diff --git a/plugins/inventory/linode.py b/plugins/inventory/linode.py
index 8790da7079..fc039b03b5 100644
--- a/plugins/inventory/linode.py
+++ b/plugins/inventory/linode.py
@@ -1,92 +1,93 @@
-# -*- coding: utf-8 -*-
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
-DOCUMENTATION = r'''
- name: linode
- author:
- - Luke Murphy (@decentral1se)
- short_description: Ansible dynamic inventory plugin for Linode.
- requirements:
- - python >= 2.7
- - linode_api4 >= 2.0.0
- description:
- - Reads inventories from the Linode API v4.
- - Uses a YAML configuration file that ends with linode.(yml|yaml).
- - Linode labels are used by default as the hostnames.
- - The default inventory groups are built from groups (deprecated by
- Linode) and not tags.
- extends_documentation_fragment:
- - constructed
- - inventory_cache
- options:
- cache:
- version_added: 4.5.0
- cache_plugin:
- version_added: 4.5.0
- cache_timeout:
- version_added: 4.5.0
- cache_connection:
- version_added: 4.5.0
- cache_prefix:
- version_added: 4.5.0
- plugin:
- description: Marks this as an instance of the 'linode' plugin.
- required: true
- choices: ['linode', 'community.general.linode']
- ip_style:
- description: Populate hostvars with all information available from the Linode APIv4.
- type: string
- default: plain
- choices:
- - plain
- - api
- version_added: 3.6.0
- access_token:
- description: The Linode account personal access token.
- required: true
- env:
- - name: LINODE_ACCESS_TOKEN
- regions:
- description: Populate inventory with instances in this region.
- default: []
- type: list
- elements: string
- tags:
- description: Populate inventory only with instances which have at least one of the tags listed here.
- default: []
- type: list
- elements: string
- version_added: 2.0.0
- types:
- description: Populate inventory with instances with this type.
- default: []
- type: list
- elements: string
- strict:
- version_added: 2.0.0
- compose:
- version_added: 2.0.0
- groups:
- version_added: 2.0.0
- keyed_groups:
- version_added: 2.0.0
-'''
+DOCUMENTATION = r"""
+name: linode
+author:
+ - Luke Murphy (@decentral1se)
+short_description: Ansible dynamic inventory plugin for Linode
+requirements:
+ - linode_api4 >= 2.0.0
+description:
+ - Reads inventories from the Linode API v4.
+ - Uses a YAML configuration file that ends with linode.(yml|yaml).
+ - Linode labels are used by default as the hostnames.
+ - The default inventory groups are built from groups (deprecated by Linode) and not tags.
+extends_documentation_fragment:
+ - constructed
+ - inventory_cache
+options:
+ cache:
+ version_added: 4.5.0
+ cache_plugin:
+ version_added: 4.5.0
+ cache_timeout:
+ version_added: 4.5.0
+ cache_connection:
+ version_added: 4.5.0
+ cache_prefix:
+ version_added: 4.5.0
+ plugin:
+ description: Marks this as an instance of the 'linode' plugin.
+ type: string
+ required: true
+ choices: ['linode', 'community.general.linode']
+ ip_style:
+ description: Populate hostvars with all information available from the Linode APIv4.
+ type: string
+ default: plain
+ choices:
+ - plain
+ - api
+ version_added: 3.6.0
+ access_token:
+ description: The Linode account personal access token.
+ type: string
+ required: true
+ env:
+ - name: LINODE_ACCESS_TOKEN
+ regions:
+ description: Populate inventory with instances in this region.
+ default: []
+ type: list
+ elements: string
+ tags:
+ description: Populate inventory only with instances which have at least one of the tags listed here.
+ default: []
+ type: list
+ elements: string
+ version_added: 2.0.0
+ types:
+ description: Populate inventory with instances with this type.
+ default: []
+ type: list
+ elements: string
+ strict:
+ version_added: 2.0.0
+ compose:
+ version_added: 2.0.0
+ groups:
+ version_added: 2.0.0
+ keyed_groups:
+ version_added: 2.0.0
+"""
-EXAMPLES = r'''
+EXAMPLES = r"""
+---
# Minimal example. `LINODE_ACCESS_TOKEN` is exposed in environment.
plugin: community.general.linode
+---
# You can use Jinja to template the access token.
plugin: community.general.linode
access_token: "{{ lookup('ini', 'token', section='your_username', file='~/.config/linode-cli') }}"
# For older Ansible versions, you need to write this as:
# access_token: "{{ lookup('ini', 'token section=your_username file=~/.config/linode-cli') }}"
+---
# Example with regions, types, groups and access token
plugin: community.general.linode
access_token: foobar
@@ -95,6 +96,7 @@ regions:
types:
- g5-standard-2
+---
# Example with keyed_groups, groups, and compose
plugin: community.general.linode
access_token: foobar
@@ -113,20 +115,19 @@ compose:
ansible_ssh_host: ipv4[0]
ansible_port: 2222
+---
# Example where control traffic limited to internal network
plugin: community.general.linode
access_token: foobar
ip_style: api
compose:
ansible_host: "ipv4 | community.general.json_query('[?public==`false`].address') | first"
-'''
+"""
-import os
-
-from ansible.errors import AnsibleError, AnsibleParserError
-from ansible.module_utils.six import string_types
+from ansible.errors import AnsibleError
from ansible.plugins.inventory import BaseInventoryPlugin, Constructable, Cacheable
-from ansible.template import Templar
+
+from ansible_collections.community.general.plugins.plugin_utils.unsafe import make_unsafe
try:
@@ -145,22 +146,14 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
def _build_client(self, loader):
"""Build the Linode client."""
- t = Templar(loader=loader)
-
access_token = self.get_option('access_token')
- if t.is_template(access_token):
- access_token = t.template(variable=access_token, disable_lookups=False)
-
- if access_token is None:
- try:
- access_token = os.environ['LINODE_ACCESS_TOKEN']
- except KeyError:
- pass
+ if self.templar.is_template(access_token):
+ access_token = self.templar.template(variable=access_token)
if access_token is None:
raise AnsibleError((
'Could not retrieve Linode access token '
- 'from plugin configuration or environment'
+ 'from plugin configuration sources'
))
self.client = LinodeClient(access_token)
@@ -170,17 +163,11 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
try:
self.instances = self.client.linode.instances()
except LinodeApiError as exception:
- raise AnsibleError('Linode client raised: %s' % exception)
+ raise AnsibleError(f'Linode client raised: {exception}')
def _add_groups(self):
"""Add Linode instance groups to the dynamic inventory."""
- self.linode_groups = set(
- filter(None, [
- instance.group
- for instance
- in self.instances
- ])
- )
+ self.linode_groups = {instance.group for instance in self.instances if instance.group}
for linode_group in self.linode_groups:
self.inventory.add_group(linode_group)
@@ -211,20 +198,21 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
def _add_instances_to_groups(self):
"""Add instance names to their dynamic inventory groups."""
for instance in self.instances:
- self.inventory.add_host(instance.label, group=instance.group)
+ self.inventory.add_host(make_unsafe(instance.label), group=instance.group)
def _add_hostvars_for_instances(self):
"""Add hostvars for instances in the dynamic inventory."""
ip_style = self.get_option('ip_style')
for instance in self.instances:
hostvars = instance._raw_json
+ hostname = make_unsafe(instance.label)
for hostvar_key in hostvars:
if ip_style == 'api' and hostvar_key in ['ipv4', 'ipv6']:
continue
self.inventory.set_variable(
- instance.label,
+ hostname,
hostvar_key,
- hostvars[hostvar_key]
+ make_unsafe(hostvars[hostvar_key])
)
if ip_style == 'api':
ips = instance.ips.ipv4.public + instance.ips.ipv4.private
@@ -233,9 +221,9 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
for ip_type in set(ip.type for ip in ips):
self.inventory.set_variable(
- instance.label,
+ hostname,
ip_type,
- self._ip_data([ip for ip in ips if ip.type == ip_type])
+ make_unsafe(self._ip_data([ip for ip in ips if ip.type == ip_type]))
)
def _ip_data(self, ip_list):
@@ -266,30 +254,44 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
self._add_instances_to_groups()
self._add_hostvars_for_instances()
for instance in self.instances:
- variables = self.inventory.get_host(instance.label).get_vars()
+ hostname = make_unsafe(instance.label)
+ variables = self.inventory.get_host(hostname).get_vars()
self._add_host_to_composed_groups(
self.get_option('groups'),
variables,
- instance.label,
+ hostname,
strict=strict)
self._add_host_to_keyed_groups(
self.get_option('keyed_groups'),
variables,
- instance.label,
+ hostname,
strict=strict)
self._set_composite_vars(
self.get_option('compose'),
variables,
- instance.label,
+ hostname,
strict=strict)
def verify_file(self, path):
- """Verify the Linode configuration file."""
+ """Verify the Linode configuration file.
+
+ Return true/false if the config-file is valid for this plugin
+
+ Args:
+ str(path): path to the config
+ Kwargs:
+ None
+ Raises:
+ None
+ Returns:
+ bool(valid): is valid config file"""
+ valid = False
if super(InventoryModule, self).verify_file(path):
- endings = ('linode.yaml', 'linode.yml')
- if any((path.endswith(ending) for ending in endings)):
- return True
- return False
+ if path.endswith(("linode.yaml", "linode.yml")):
+ valid = True
+ else:
+ self.display.vvv('Inventory source not ending in "linode.yaml" or "linode.yml"')
+ return valid
def parse(self, inventory, loader, path, cache=True):
"""Dynamically parse Linode the cloud inventory."""
diff --git a/plugins/inventory/lxd.py b/plugins/inventory/lxd.py
index 291d12b037..492d12a21b 100644
--- a/plugins/inventory/lxd.py
+++ b/plugins/inventory/lxd.py
@@ -1,102 +1,123 @@
-# -*- coding: utf-8 -*-
# Copyright (c) 2021, Frank Dornheim
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
-DOCUMENTATION = r'''
- name: lxd
- short_description: Returns Ansible inventory from lxd host
+DOCUMENTATION = r"""
+name: lxd
+short_description: Returns Ansible inventory from lxd host
+description:
+ - Get inventory from the lxd.
+ - Uses a YAML configuration file that ends with 'lxd.(yml|yaml)'.
+version_added: "3.0.0"
+author: "Frank Dornheim (@conloos)"
+requirements:
+ - ipaddress
+ - lxd >= 4.0
+options:
+ plugin:
+ description: Token that ensures this is a source file for the 'lxd' plugin.
+ type: string
+ required: true
+ choices: ['community.general.lxd']
+ url:
description:
- - Get inventory from the lxd.
- - Uses a YAML configuration file that ends with 'lxd.(yml|yaml)'.
- version_added: "3.0.0"
- author: "Frank Dornheim (@conloos)"
- requirements:
- - ipaddress
- - lxd >= 4.0
- options:
- plugin:
- description: Token that ensures this is a source file for the 'lxd' plugin.
- required: true
- choices: [ 'community.general.lxd' ]
- url:
- description:
- - The unix domain socket path or the https URL for the lxd server.
- - Sockets in filesystem have to start with C(unix:).
- - Mostly C(unix:/var/lib/lxd/unix.socket) or C(unix:/var/snap/lxd/common/lxd/unix.socket).
- default: unix:/var/snap/lxd/common/lxd/unix.socket
- type: str
- client_key:
- description:
- - The client certificate key file path.
- aliases: [ key_file ]
- default: $HOME/.config/lxc/client.key
- type: path
- client_cert:
- description:
- - The client certificate file path.
- aliases: [ cert_file ]
- default: $HOME/.config/lxc/client.crt
- type: path
- trust_password:
- description:
- - The client trusted password.
- - You need to set this password on the lxd server before
- running this module using the following command
- C(lxc config set core.trust_password )
- See U(https://www.stgraber.org/2016/04/18/lxd-api-direct-interaction/).
- - If I(trust_password) is set, this module send a request for authentication before sending any requests.
- type: str
- state:
- description: Filter the instance according to the current status.
- type: str
- default: none
- choices: [ 'STOPPED', 'STARTING', 'RUNNING', 'none' ]
- type_filter:
- description:
- - Filter the instances by type C(virtual-machine), C(container) or C(both).
- - The first version of the inventory only supported containers.
- type: str
- default: container
- choices: [ 'virtual-machine', 'container', 'both' ]
- version_added: 4.2.0
- prefered_instance_network_interface:
- description:
- - If an instance has multiple network interfaces, select which one is the prefered as pattern.
- - Combined with the first number that can be found e.g. 'eth' + 0.
- - The option has been renamed from I(prefered_container_network_interface) to I(prefered_instance_network_interface) in community.general 3.8.0.
- The old name still works as an alias.
- type: str
- default: eth
- aliases:
- - prefered_container_network_interface
- prefered_instance_network_family:
- description:
- - If an instance has multiple network interfaces, which one is the prefered by family.
- - Specify C(inet) for IPv4 and C(inet6) for IPv6.
- type: str
- default: inet
- choices: [ 'inet', 'inet6' ]
- groupby:
- description:
- - Create groups by the following keywords C(location), C(network_range), C(os), C(pattern), C(profile), C(release), C(type), C(vlanid).
- - See example for syntax.
- type: dict
-'''
+ - The unix domain socket path or the https URL for the lxd server.
+ - Sockets in filesystem have to start with C(unix:).
+ - Mostly C(unix:/var/lib/lxd/unix.socket) or C(unix:/var/snap/lxd/common/lxd/unix.socket).
+ type: string
+ default: unix:/var/snap/lxd/common/lxd/unix.socket
+ client_key:
+ description:
+ - The client certificate key file path.
+ aliases: [key_file]
+ default: $HOME/.config/lxc/client.key
+ type: path
+ client_cert:
+ description:
+ - The client certificate file path.
+ aliases: [cert_file]
+ default: $HOME/.config/lxc/client.crt
+ type: path
+ server_cert:
+ description:
+ - The server certificate file path.
+ type: path
+ version_added: 8.0.0
+ server_check_hostname:
+ description:
+ - This option controls if the server's hostname is checked as part of the HTTPS connection verification. This can be
+ useful to disable, if for example, the server certificate provided (see O(server_cert) option) does not cover a name
+ matching the one used to communicate with the server. Such mismatch is common as LXD generates self-signed server
+ certificates by default.
+ type: bool
+ default: true
+ version_added: 8.0.0
+ trust_password:
+ description:
+ - The client trusted password.
+ - You need to set this password on the lxd server before running this module using the following command C(lxc config
+ set core.trust_password ) See
+ U(https://documentation.ubuntu.com/lxd/en/latest/authentication/#adding-client-certificates-using-a-trust-password).
+ - If O(trust_password) is set, this module send a request for authentication before sending any requests.
+ type: str
+ state:
+ description: Filter the instance according to the current status.
+ type: str
+ default: none
+ choices: ['STOPPED', 'STARTING', 'RUNNING', 'none']
+ project:
+ description: Filter the instance according to the given project.
+ type: str
+ default: default
+ version_added: 6.2.0
+ type_filter:
+ description:
+ - Filter the instances by type V(virtual-machine), V(container) or V(both).
+ - The first version of the inventory only supported containers.
+ type: str
+ default: container
+ choices: ['virtual-machine', 'container', 'both']
+ version_added: 4.2.0
+ prefered_instance_network_interface:
+ description:
+ - If an instance has multiple network interfaces, select which one is the preferred as pattern.
+ - Combined with the first number that can be found, for example C(eth) + C(0).
+ - The option has been renamed from O(prefered_container_network_interface) to O(prefered_instance_network_interface)
+ in community.general 3.8.0. The old name still works as an alias.
+ type: str
+ default: eth
+ aliases:
+ - prefered_container_network_interface
+ prefered_instance_network_family:
+ description:
+ - If an instance has multiple network interfaces, which one is the preferred by family.
+ - Specify V(inet) for IPv4 and V(inet6) for IPv6.
+ type: str
+ default: inet
+ choices: ['inet', 'inet6']
+ groupby:
+ description:
+ - Create groups by the following keywords C(location), C(network_range), C(os), C(pattern), C(profile), C(release),
+ C(type), C(vlanid).
+ - See example for syntax.
+ type: dict
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
+---
# simple lxd.yml
plugin: community.general.lxd
url: unix:/var/snap/lxd/common/lxd/unix.socket
+---
# simple lxd.yml including filter
plugin: community.general.lxd
url: unix:/var/snap/lxd/common/lxd/unix.socket
state: RUNNING
+---
# simple lxd.yml including virtual machines and containers
plugin: community.general.lxd
url: unix:/var/snap/lxd/common/lxd/unix.socket
@@ -140,20 +161,23 @@ groupby:
vlan666:
type: vlanid
attribute: 666
-'''
+ projectInternals:
+ type: project
+ attribute: internals
+"""
-import binascii
import json
import re
import time
import os
-import socket
+from urllib.parse import urlencode
+
from ansible.plugins.inventory import BaseInventoryPlugin
from ansible.module_utils.common.text.converters import to_native, to_text
from ansible.module_utils.common.dict_transformations import dict_merge
-from ansible.module_utils.six import raise_from
from ansible.errors import AnsibleError, AnsibleParserError
from ansible_collections.community.general.plugins.module_utils.lxd import LXDClient, LXDClientException
+from ansible_collections.community.general.plugins.plugin_utils.unsafe import make_unsafe
try:
import ipaddress
@@ -188,7 +212,7 @@ class InventoryModule(BaseInventoryPlugin):
with open(path, 'r') as json_file:
return json.load(json_file)
except (IOError, json.decoder.JSONDecodeError) as err:
- raise AnsibleParserError('Could not load the test data from {0}: {1}'.format(to_native(path), to_native(err)))
+ raise AnsibleParserError(f'Could not load the test data from {to_native(path)}: {err}')
def save_json_data(self, path, file_name=None):
"""save data as json
@@ -218,7 +242,7 @@ class InventoryModule(BaseInventoryPlugin):
with open(os.path.abspath(os.path.join(cwd, *path)), 'w') as json_file:
json.dump(self.data, json_file)
except IOError as err:
- raise AnsibleParserError('Could not save data: {0}'.format(to_native(err)))
+ raise AnsibleParserError(f'Could not save data: {err}')
def verify_file(self, path):
"""Check the config
@@ -258,7 +282,7 @@ class InventoryModule(BaseInventoryPlugin):
if not isinstance(url, str):
return False
if not url.startswith(('unix:', 'https:')):
- raise AnsibleError('URL is malformed: {0}'.format(to_native(url)))
+ raise AnsibleError(f'URL is malformed: {url}')
return True
def _connect_to_socket(self):
@@ -279,11 +303,11 @@ class InventoryModule(BaseInventoryPlugin):
urls = (url for url in url_list if self.validate_url(url))
for url in urls:
try:
- socket_connection = LXDClient(url, self.client_key, self.client_cert, self.debug)
+ socket_connection = LXDClient(url, self.client_key, self.client_cert, self.debug, self.server_cert, self.server_check_hostname)
return socket_connection
except LXDClientException as err:
error_storage[url] = err
- raise AnsibleError('No connection to the socket: {0}'.format(to_native(error_storage)))
+ raise AnsibleError(f'No connection to the socket: {error_storage}')
def _get_networks(self):
"""Get Networknames
@@ -330,7 +354,15 @@ class InventoryModule(BaseInventoryPlugin):
# "status_code": 200,
# "type": "sync"
# }
- instances = self.socket.do('GET', '/1.0/instances')
+ url = '/1.0/instances'
+ if self.project:
+ url = f"{url}?{urlencode(dict(project=self.project))}"
+
+ instances = self.socket.do('GET', url)
+
+ if self.project:
+ return [m.split('/')[3].split('?')[0] for m in instances['metadata']]
+
return [m.split('/')[3] for m in instances['metadata']]
def _get_config(self, branch, name):
@@ -344,22 +376,24 @@ class InventoryModule(BaseInventoryPlugin):
Kwargs:
None
Source:
- https://github.com/lxc/lxd/blob/master/doc/rest-api.md
+ https://documentation.ubuntu.com/lxd/en/latest/rest-api/
Raises:
None
Returns:
dict(config): Config of the instance"""
config = {}
if isinstance(branch, (tuple, list)):
- config[name] = {branch[1]: self.socket.do('GET', '/1.0/{0}/{1}/{2}'.format(to_native(branch[0]), to_native(name), to_native(branch[1])))}
+ config[name] = {branch[1]: self.socket.do(
+ 'GET', f'/1.0/{to_native(branch[0])}/{to_native(name)}/{to_native(branch[1])}?{urlencode(dict(project=self.project))}')}
else:
- config[name] = {branch: self.socket.do('GET', '/1.0/{0}/{1}'.format(to_native(branch), to_native(name)))}
+ config[name] = {branch: self.socket.do(
+ 'GET', f'/1.0/{to_native(branch)}/{to_native(name)}?{urlencode(dict(project=self.project))}')}
return config
def get_instance_data(self, names):
"""Create Inventory of the instance
- Iterate through the different branches of the instances and collect Informations.
+ Iterate through the different branches of the instances and collect Information.
Args:
list(names): List of instance names
@@ -381,7 +415,7 @@ class InventoryModule(BaseInventoryPlugin):
def get_network_data(self, names):
"""Create Inventory of the instance
- Iterate through the different branches of the instances and collect Informations.
+ Iterate through the different branches of the instances and collect Information.
Args:
list(names): List of instance names
@@ -416,7 +450,7 @@ class InventoryModule(BaseInventoryPlugin):
None
Returns:
dict(network_configuration): network config"""
- instance_network_interfaces = self._get_data_entry('instances/{0}/state/metadata/network'.format(instance_name))
+ instance_network_interfaces = self._get_data_entry(f'instances/{instance_name}/state/metadata/network')
network_configuration = None
if instance_network_interfaces:
network_configuration = {}
@@ -429,24 +463,24 @@ class InventoryModule(BaseInventoryPlugin):
address_set['family'] = address.get('family')
address_set['address'] = address.get('address')
address_set['netmask'] = address.get('netmask')
- address_set['combined'] = address.get('address') + '/' + address.get('netmask')
+ address_set['combined'] = f"{address.get('address')}/{address.get('netmask')}"
network_configuration[interface_name].append(address_set)
return network_configuration
def get_prefered_instance_network_interface(self, instance_name):
- """Helper to get the prefered interface of thr instance
+ """Helper to get the preferred interface of thr instance
- Helper to get the prefered interface provide by neme pattern from 'prefered_instance_network_interface'.
+ Helper to get the preferred interface provide by neme pattern from 'prefered_instance_network_interface'.
Args:
- str(containe_name): name of instance
+ str(instance_name): name of instance
Kwargs:
None
Raises:
None
Returns:
str(prefered_interface): None or interface name"""
- instance_network_interfaces = self._get_data_entry('inventory/{0}/network_interfaces'.format(instance_name))
+ instance_network_interfaces = self._get_data_entry(f'inventory/{instance_name}/network_interfaces')
prefered_interface = None # init
if instance_network_interfaces: # instance have network interfaces
# generator if interfaces which start with the desired pattern
@@ -464,7 +498,7 @@ class InventoryModule(BaseInventoryPlugin):
Helper to get the VLAN_ID from the instance
Args:
- str(containe_name): name of instance
+ str(instance_name): name of instance
Kwargs:
None
Raises:
@@ -483,7 +517,7 @@ class InventoryModule(BaseInventoryPlugin):
# "network":"lxdbr0",
# "type":"nic"},
vlan_ids = {}
- devices = self._get_data_entry('instances/{0}/instances/metadata/expanded_devices'.format(to_native(instance_name)))
+ devices = self._get_data_entry(f'instances/{to_native(instance_name)}/instances/metadata/expanded_devices')
for device in devices:
if 'network' in devices[device]:
if devices[device]['network'] in network_vlans:
@@ -546,7 +580,7 @@ class InventoryModule(BaseInventoryPlugin):
else:
path[instance_name][key] = value
except KeyError as err:
- raise AnsibleParserError("Unable to store Informations: {0}".format(to_native(err)))
+ raise AnsibleParserError(f"Unable to store Information: {err}")
def extract_information_from_instance_configs(self):
"""Process configuration information
@@ -567,22 +601,24 @@ class InventoryModule(BaseInventoryPlugin):
for instance_name in self.data['instances']:
self._set_data_entry(instance_name, 'os', self._get_data_entry(
- 'instances/{0}/instances/metadata/config/image.os'.format(instance_name)))
+ f'instances/{instance_name}/instances/metadata/config/image.os'))
self._set_data_entry(instance_name, 'release', self._get_data_entry(
- 'instances/{0}/instances/metadata/config/image.release'.format(instance_name)))
+ f'instances/{instance_name}/instances/metadata/config/image.release'))
self._set_data_entry(instance_name, 'version', self._get_data_entry(
- 'instances/{0}/instances/metadata/config/image.version'.format(instance_name)))
+ f'instances/{instance_name}/instances/metadata/config/image.version'))
self._set_data_entry(instance_name, 'profile', self._get_data_entry(
- 'instances/{0}/instances/metadata/profiles'.format(instance_name)))
+ f'instances/{instance_name}/instances/metadata/profiles'))
self._set_data_entry(instance_name, 'location', self._get_data_entry(
- 'instances/{0}/instances/metadata/location'.format(instance_name)))
+ f'instances/{instance_name}/instances/metadata/location'))
self._set_data_entry(instance_name, 'state', self._get_data_entry(
- 'instances/{0}/instances/metadata/config/volatile.last_state.power'.format(instance_name)))
+ f'instances/{instance_name}/instances/metadata/config/volatile.last_state.power'))
self._set_data_entry(instance_name, 'type', self._get_data_entry(
- 'instances/{0}/instances/metadata/type'.format(instance_name)))
+ f'instances/{instance_name}/instances/metadata/type'))
self._set_data_entry(instance_name, 'network_interfaces', self.extract_network_information_from_instance_config(instance_name))
self._set_data_entry(instance_name, 'preferred_interface', self.get_prefered_instance_network_interface(instance_name))
self._set_data_entry(instance_name, 'vlan_ids', self.get_instance_vlans(instance_name))
+ self._set_data_entry(instance_name, 'project', self._get_data_entry(
+ f'instances/{instance_name}/instances/metadata/project'))
def build_inventory_network(self, instance_name):
"""Add the network interfaces of the instance to the inventory
@@ -616,18 +652,18 @@ class InventoryModule(BaseInventoryPlugin):
None
Returns:
dict(interface_name: ip)"""
- prefered_interface = self._get_data_entry('inventory/{0}/preferred_interface'.format(instance_name)) # name or None
+ prefered_interface = self._get_data_entry(f'inventory/{instance_name}/preferred_interface') # name or None
prefered_instance_network_family = self.prefered_instance_network_family
ip_address = ''
if prefered_interface:
- interface = self._get_data_entry('inventory/{0}/network_interfaces/{1}'.format(instance_name, prefered_interface))
+ interface = self._get_data_entry(f'inventory/{instance_name}/network_interfaces/{prefered_interface}')
for config in interface:
if config['family'] == prefered_instance_network_family:
ip_address = config['address']
break
else:
- interfaces = self._get_data_entry('inventory/{0}/network_interfaces'.format(instance_name))
+ interfaces = self._get_data_entry(f'inventory/{instance_name}/network_interfaces')
for interface in interfaces.values():
for config in interface:
if config['family'] == prefered_instance_network_family:
@@ -635,9 +671,9 @@ class InventoryModule(BaseInventoryPlugin):
break
return ip_address
- if self._get_data_entry('inventory/{0}/network_interfaces'.format(instance_name)): # instance have network interfaces
+ if self._get_data_entry(f'inventory/{instance_name}/network_interfaces'): # instance have network interfaces
self.inventory.set_variable(instance_name, 'ansible_connection', 'ssh')
- self.inventory.set_variable(instance_name, 'ansible_host', interface_selection(instance_name))
+ self.inventory.set_variable(instance_name, 'ansible_host', make_unsafe(interface_selection(instance_name)))
else:
self.inventory.set_variable(instance_name, 'ansible_connection', 'local')
@@ -656,36 +692,46 @@ class InventoryModule(BaseInventoryPlugin):
Returns:
None"""
for instance_name in self.data['inventory']:
- instance_state = str(self._get_data_entry('inventory/{0}/state'.format(instance_name)) or "STOPPED").lower()
+ instance_state = str(self._get_data_entry(f'inventory/{instance_name}/state') or "STOPPED").lower()
# Only consider instances that match the "state" filter, if self.state is not None
if self.filter:
if self.filter.lower() != instance_state:
continue
# add instance
+ instance_name = make_unsafe(instance_name)
self.inventory.add_host(instance_name)
- # add network informations
+ # add network information
self.build_inventory_network(instance_name)
# add os
- v = self._get_data_entry('inventory/{0}/os'.format(instance_name))
+ v = self._get_data_entry(f'inventory/{instance_name}/os')
if v:
- self.inventory.set_variable(instance_name, 'ansible_lxd_os', v.lower())
+ self.inventory.set_variable(instance_name, 'ansible_lxd_os', make_unsafe(v.lower()))
# add release
- v = self._get_data_entry('inventory/{0}/release'.format(instance_name))
+ v = self._get_data_entry(f'inventory/{instance_name}/release')
if v:
- self.inventory.set_variable(instance_name, 'ansible_lxd_release', v.lower())
+ self.inventory.set_variable(
+ instance_name, 'ansible_lxd_release', make_unsafe(v.lower()))
# add profile
- self.inventory.set_variable(instance_name, 'ansible_lxd_profile', self._get_data_entry('inventory/{0}/profile'.format(instance_name)))
+ self.inventory.set_variable(
+ instance_name, 'ansible_lxd_profile', make_unsafe(self._get_data_entry(f'inventory/{instance_name}/profile')))
# add state
- self.inventory.set_variable(instance_name, 'ansible_lxd_state', instance_state)
+ self.inventory.set_variable(
+ instance_name, 'ansible_lxd_state', make_unsafe(instance_state))
# add type
- self.inventory.set_variable(instance_name, 'ansible_lxd_type', self._get_data_entry('inventory/{0}/type'.format(instance_name)))
+ self.inventory.set_variable(
+ instance_name, 'ansible_lxd_type', make_unsafe(self._get_data_entry(f'inventory/{instance_name}/type')))
# add location information
- if self._get_data_entry('inventory/{0}/location'.format(instance_name)) != "none": # wrong type by lxd 'none' != 'None'
- self.inventory.set_variable(instance_name, 'ansible_lxd_location', self._get_data_entry('inventory/{0}/location'.format(instance_name)))
+ if self._get_data_entry(f'inventory/{instance_name}/location') != "none": # wrong type by lxd 'none' != 'None'
+ self.inventory.set_variable(
+ instance_name, 'ansible_lxd_location', make_unsafe(self._get_data_entry(f'inventory/{instance_name}/location')))
# add VLAN_ID information
- if self._get_data_entry('inventory/{0}/vlan_ids'.format(instance_name)):
- self.inventory.set_variable(instance_name, 'ansible_lxd_vlan_ids', self._get_data_entry('inventory/{0}/vlan_ids'.format(instance_name)))
+ if self._get_data_entry(f'inventory/{instance_name}/vlan_ids'):
+ self.inventory.set_variable(
+ instance_name, 'ansible_lxd_vlan_ids', make_unsafe(self._get_data_entry(f'inventory/{instance_name}/vlan_ids')))
+ # add project
+ self.inventory.set_variable(
+ instance_name, 'ansible_lxd_project', make_unsafe(self._get_data_entry(f'inventory/{instance_name}/project')))
def build_inventory_groups_location(self, group_name):
"""create group by attribute: location
@@ -747,7 +793,7 @@ class InventoryModule(BaseInventoryPlugin):
network = ipaddress.ip_network(to_text(self.groupby[group_name].get('attribute')))
except ValueError as err:
raise AnsibleParserError(
- 'Error while parsing network range {0}: {1}'.format(self.groupby[group_name].get('attribute'), to_native(err)))
+ f"Error while parsing network range {self.groupby[group_name].get('attribute')}: {err}")
for instance_name in self.inventory.hosts:
if self.data['inventory'][instance_name].get('network_interfaces') is not None:
@@ -761,6 +807,28 @@ class InventoryModule(BaseInventoryPlugin):
# Ignore invalid IP addresses returned by lxd
pass
+ def build_inventory_groups_project(self, group_name):
+ """create group by attribute: project
+
+ Args:
+ str(group_name): Group name
+ Kwargs:
+ None
+ Raises:
+ None
+ Returns:
+ None"""
+ # maybe we just want to expand one group
+ if group_name not in self.inventory.groups:
+ self.inventory.add_group(group_name)
+
+ gen_instances = [
+ instance_name for instance_name in self.inventory.hosts
+ if 'ansible_lxd_project' in self.inventory.get_host(instance_name).get_vars()]
+ for instance_name in gen_instances:
+ if self.groupby[group_name].get('attribute').lower() == self.inventory.get_host(instance_name).get_vars().get('ansible_lxd_project'):
+ self.inventory.add_child(group_name, instance_name)
+
def build_inventory_groups_os(self, group_name):
"""create group by attribute: os
@@ -899,6 +967,7 @@ class InventoryModule(BaseInventoryPlugin):
* 'profile'
* 'vlanid'
* 'type'
+ * 'project'
Args:
str(group_name): Group name
@@ -926,14 +995,16 @@ class InventoryModule(BaseInventoryPlugin):
self.build_inventory_groups_vlanid(group_name)
elif self.groupby[group_name].get('type') == 'type':
self.build_inventory_groups_type(group_name)
+ elif self.groupby[group_name].get('type') == 'project':
+ self.build_inventory_groups_project(group_name)
else:
- raise AnsibleParserError('Unknown group type: {0}'.format(to_native(group_name)))
+ raise AnsibleParserError(f'Unknown group type: {to_native(group_name)}')
if self.groupby:
for group_name in self.groupby:
if not group_name.isalnum():
- raise AnsibleParserError('Invalid character(s) in groupname: {0}'.format(to_native(group_name)))
- group_type(group_name)
+ raise AnsibleParserError(f'Invalid character(s) in groupname: {to_native(group_name)}')
+ group_type(make_unsafe(group_name))
def build_inventory(self):
"""Build dynamic inventory
@@ -969,7 +1040,7 @@ class InventoryModule(BaseInventoryPlugin):
None"""
iter_keys = list(self.data['instances'].keys())
for instance_name in iter_keys:
- if self._get_data_entry('instances/{0}/instances/metadata/type'.format(instance_name)) != self.type_filter:
+ if self._get_data_entry(f'instances/{instance_name}/instances/metadata/type') != self.type_filter:
del self.data['instances'][instance_name]
def _populate(self):
@@ -1022,9 +1093,7 @@ class InventoryModule(BaseInventoryPlugin):
Returns:
None"""
if IPADDRESS_IMPORT_ERROR:
- raise_from(
- AnsibleError('another_library must be installed to use this plugin'),
- IPADDRESS_IMPORT_ERROR)
+ raise AnsibleError('another_library must be installed to use this plugin') from IPADDRESS_IMPORT_ERROR
super(InventoryModule, self).parse(inventory, loader, path, cache=False)
# Read the inventory YAML file
@@ -1032,6 +1101,9 @@ class InventoryModule(BaseInventoryPlugin):
try:
self.client_key = self.get_option('client_key')
self.client_cert = self.get_option('client_cert')
+ self.server_cert = self.get_option('server_cert')
+ self.server_check_hostname = self.get_option('server_check_hostname')
+ self.project = self.get_option('project')
self.debug = self.DEBUG
self.data = {} # store for inventory-data
self.groupby = self.get_option('groupby')
@@ -1047,6 +1119,6 @@ class InventoryModule(BaseInventoryPlugin):
self.url = self.get_option('url')
except Exception as err:
raise AnsibleParserError(
- 'All correct options required: {0}'.format(to_native(err)))
+ f'All correct options required: {err}')
# Call our internal helper to populate the dynamic inventory
self._populate()
diff --git a/plugins/inventory/nmap.py b/plugins/inventory/nmap.py
index 3c3b3f8e41..ea0ce560fd 100644
--- a/plugins/inventory/nmap.py
+++ b/plugins/inventory/nmap.py
@@ -1,68 +1,127 @@
-# -*- coding: utf-8 -*-
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
-DOCUMENTATION = '''
- author: Unknown (!UNKNOWN)
- name: nmap
- short_description: Uses nmap to find hosts to target
+DOCUMENTATION = r"""
+author: Unknown (!UNKNOWN)
+name: nmap
+short_description: Uses nmap to find hosts to target
+description:
+ - Uses a YAML configuration file with a valid YAML extension.
+extends_documentation_fragment:
+ - constructed
+ - inventory_cache
+requirements:
+ - nmap CLI installed
+options:
+ plugin:
+ description: Token that ensures this is a source file for the P(community.general.nmap#inventory) plugin.
+ type: string
+ required: true
+ choices: ['nmap', 'community.general.nmap']
+ sudo:
+ description: Set to V(true) to execute a C(sudo nmap) plugin scan.
+ version_added: 4.8.0
+ default: false
+ type: boolean
+ address:
+ description: Network IP or range of IPs to scan, you can use a simple range (10.2.2.15-25) or CIDR notation.
+ type: string
+ required: true
+ env:
+ - name: ANSIBLE_NMAP_ADDRESS
+ version_added: 6.6.0
+ exclude:
description:
- - Uses a YAML configuration file with a valid YAML extension.
- extends_documentation_fragment:
- - constructed
- - inventory_cache
- requirements:
- - nmap CLI installed
- options:
- plugin:
- description: token that ensures this is a source file for the 'nmap' plugin.
- required: True
- choices: ['nmap', 'community.general.nmap']
- sudo:
- description: Set to C(true) to execute a C(sudo nmap) plugin scan.
- version_added: 4.8.0
- default: false
- type: boolean
- address:
- description: Network IP or range of IPs to scan, you can use a simple range (10.2.2.15-25) or CIDR notation.
- required: True
- exclude:
- description: list of addresses to exclude
- type: list
- elements: string
- ports:
- description: Enable/disable scanning for open ports
- type: boolean
- default: True
- ipv4:
- description: use IPv4 type addresses
- type: boolean
- default: True
- ipv6:
- description: use IPv6 type addresses
- type: boolean
- default: True
- notes:
- - At least one of ipv4 or ipv6 is required to be True, both can be True, but they cannot both be False.
- - 'TODO: add OS fingerprinting'
-'''
-EXAMPLES = '''
+ - List of addresses to exclude.
+ - For example V(10.2.2.15-25) or V(10.2.2.15,10.2.2.16).
+ type: list
+ elements: string
+ env:
+ - name: ANSIBLE_NMAP_EXCLUDE
+ version_added: 6.6.0
+ port:
+ description:
+ - Only scan specific port or port range (C(-p)).
+ - For example, you could pass V(22) for a single port, V(1-65535) for a range of ports, or V(U:53,137,T:21-25,139,8080,S:9)
+ to check port 53 with UDP, ports 21-25 with TCP, port 9 with SCTP, and ports 137, 139, and 8080 with all.
+ type: string
+ version_added: 6.5.0
+ ports:
+ description: Enable/disable scanning ports.
+ type: boolean
+ default: true
+ ipv4:
+ description: Use IPv4 type addresses.
+ type: boolean
+ default: true
+ ipv6:
+ description: Use IPv6 type addresses.
+ type: boolean
+ default: true
+ udp_scan:
+ description:
+ - Scan using UDP.
+ - Depending on your system you might need O(sudo=true) for this to work.
+ type: boolean
+ default: false
+ version_added: 6.1.0
+ icmp_timestamp:
+ description:
+ - Scan using ICMP Timestamp (C(-PP)).
+ - Depending on your system you might need O(sudo=true) for this to work.
+ type: boolean
+ default: false
+ version_added: 6.1.0
+ open:
+ description: Only scan for open (or possibly open) ports.
+ type: boolean
+ default: false
+ version_added: 6.5.0
+ dns_resolve:
+ description: Whether to always (V(true)) or never (V(false)) do DNS resolution.
+ type: boolean
+ default: false
+ version_added: 6.1.0
+ dns_servers:
+ description: Specify which DNS servers to use for name resolution.
+ type: list
+ elements: string
+ version_added: 10.5.0
+ use_arp_ping:
+ description: Whether to always (V(true)) use the quick ARP ping or (V(false)) a slower but more reliable method.
+ type: boolean
+ default: true
+ version_added: 7.4.0
+notes:
+ - At least one of O(ipv4) or O(ipv6) is required to be V(true); both can be V(true), but they cannot both be V(false).
+ - 'TODO: add OS fingerprinting.'
+"""
+EXAMPLES = r"""
+---
# inventory.config file in YAML format
plugin: community.general.nmap
-strict: False
+strict: false
address: 192.168.0.0/24
-
+---
# a sudo nmap scan to fully use nmap scan power.
plugin: community.general.nmap
sudo: true
-strict: False
+strict: false
address: 192.168.0.0/24
-'''
+
+---
+# an nmap scan specifying ports and classifying results to an inventory group
+plugin: community.general.nmap
+address: 192.168.0.0/24
+exclude: 192.168.0.1, web.example.com
+port: 22, 443
+groups:
+ web_servers: "ports | selectattr('port', 'equalto', '443')"
+"""
import os
import re
@@ -75,6 +134,8 @@ from ansible.module_utils.common.text.converters import to_native, to_text
from ansible.plugins.inventory import BaseInventoryPlugin, Constructable, Cacheable
from ansible.module_utils.common.process import get_bin_path
+from ansible_collections.community.general.plugins.plugin_utils.unsafe import make_unsafe
+
class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
@@ -91,6 +152,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
strict = self.get_option('strict')
for host in hosts:
+ host = make_unsafe(host)
hostname = host['name']
self.inventory.add_host(hostname)
for var, value in host.items():
@@ -121,7 +183,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
try:
self._nmap = get_bin_path('nmap')
except ValueError as e:
- raise AnsibleParserError('nmap inventory plugin requires the nmap cli tool to work: {0}'.format(to_native(e)))
+ raise AnsibleParserError(f'nmap inventory plugin requires the nmap cli tool to work: {e}')
super(InventoryModule, self).parse(inventory, loader, path, cache=cache)
@@ -149,30 +211,53 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
# setup command
cmd = [self._nmap]
- if self._options['sudo']:
+ if self.get_option('sudo'):
cmd.insert(0, 'sudo')
- if not self._options['ports']:
+ if self.get_option('port'):
+ cmd.append('-p')
+ cmd.append(self.get_option('port'))
+
+ if not self.get_option('ports'):
cmd.append('-sP')
- if self._options['ipv4'] and not self._options['ipv6']:
+ if self.get_option('ipv4') and not self.get_option('ipv6'):
cmd.append('-4')
- elif self._options['ipv6'] and not self._options['ipv4']:
+ elif self.get_option('ipv6') and not self.get_option('ipv4'):
cmd.append('-6')
- elif not self._options['ipv6'] and not self._options['ipv4']:
+ elif not self.get_option('ipv6') and not self.get_option('ipv4'):
raise AnsibleParserError('One of ipv4 or ipv6 must be enabled for this plugin')
- if self._options['exclude']:
+ if self.get_option('exclude'):
cmd.append('--exclude')
- cmd.append(','.join(self._options['exclude']))
+ cmd.append(','.join(self.get_option('exclude')))
- cmd.append(self._options['address'])
+ if self.get_option('dns_resolve'):
+ cmd.append('-n')
+
+ if self.get_option('dns_servers'):
+ cmd.append('--dns-servers')
+ cmd.append(','.join(self.get_option('dns_servers')))
+
+ if self.get_option('udp_scan'):
+ cmd.append('-sU')
+
+ if self.get_option('icmp_timestamp'):
+ cmd.append('-PP')
+
+ if self.get_option('open'):
+ cmd.append('--open')
+
+ if not self.get_option('use_arp_ping'):
+ cmd.append('--disable-arp-ping')
+
+ cmd.append(self.get_option('address'))
try:
# execute
p = Popen(cmd, stdout=PIPE, stderr=PIPE)
stdout, stderr = p.communicate()
if p.returncode != 0:
- raise AnsibleParserError('Failed to run nmap, rc=%s: %s' % (p.returncode, to_native(stderr)))
+ raise AnsibleParserError(f'Failed to run nmap, rc={p.returncode}: {to_native(stderr)}')
# parse results
host = None
@@ -183,7 +268,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
try:
t_stdout = to_text(stdout, errors='surrogate_or_strict')
except UnicodeError as e:
- raise AnsibleParserError('Invalid (non unicode) input returned: %s' % to_native(e))
+ raise AnsibleParserError(f'Invalid (non unicode) input returned: {e}')
for line in t_stdout.splitlines():
hits = self.find_host.match(line)
@@ -224,7 +309,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
results[-1]['ports'] = ports
except Exception as e:
- raise AnsibleParserError("failed to parse %s: %s " % (to_native(path), to_native(e)))
+ raise AnsibleParserError(f"failed to parse {to_native(path)}: {e} ")
if cache_needs_update:
self._cache[cache_key] = results
diff --git a/plugins/inventory/online.py b/plugins/inventory/online.py
index f0424ea5e8..cbc46a6723 100644
--- a/plugins/inventory/online.py
+++ b/plugins/inventory/online.py
@@ -1,52 +1,52 @@
-# -*- coding: utf-8 -*-
# Copyright (c) 2018 Ansible Project
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
-DOCUMENTATION = r'''
- name: online
- author:
- - Remy Leone (@remyleone)
- short_description: Scaleway (previously Online SAS or Online.net) inventory source
- description:
- - Get inventory hosts from Scaleway (previously Online SAS or Online.net).
- options:
- plugin:
- description: token that ensures this is a source file for the 'online' plugin.
- required: True
- choices: ['online', 'community.general.online']
- oauth_token:
- required: True
- description: Online OAuth token.
- env:
- # in order of precedence
- - name: ONLINE_TOKEN
- - name: ONLINE_API_KEY
- - name: ONLINE_OAUTH_TOKEN
- hostnames:
- description: List of preference about what to use as an hostname.
- type: list
- elements: string
- default:
- - public_ipv4
- choices:
- - public_ipv4
- - private_ipv4
- - hostname
- groups:
- description: List of groups.
- type: list
- elements: string
- choices:
- - location
- - offer
- - rpn
-'''
+DOCUMENTATION = r"""
+name: online
+author:
+ - Remy Leone (@remyleone)
+short_description: Scaleway (previously Online SAS or Online.net) inventory source
+description:
+ - Get inventory hosts from Scaleway (previously Online SAS or Online.net).
+options:
+ plugin:
+ description: Token that ensures this is a source file for the P(community.general.online#inventory) plugin.
+ type: string
+ required: true
+ choices: ['online', 'community.general.online']
+ oauth_token:
+ required: true
+ description: Online OAuth token.
+ type: string
+ env:
+ # in order of precedence
+ - name: ONLINE_TOKEN
+ - name: ONLINE_API_KEY
+ - name: ONLINE_OAUTH_TOKEN
+ hostnames:
+ description: List of preference about what to use as an hostname.
+ type: list
+ elements: string
+ default:
+ - public_ipv4
+ choices:
+ - public_ipv4
+ - private_ipv4
+ - hostname
+ groups:
+ description: List of groups.
+ type: list
+ elements: string
+ choices:
+ - location
+ - offer
+ - rpn
+"""
-EXAMPLES = r'''
+EXAMPLES = r"""
# online_inventory.yml file in YAML format
# Example command line: ansible-inventory --list -i online_inventory.yml
@@ -57,17 +57,19 @@ groups:
- location
- offer
- rpn
-'''
+"""
import json
from sys import version as python_version
+from urllib.parse import urljoin
from ansible.errors import AnsibleError
from ansible.module_utils.urls import open_url
from ansible.plugins.inventory import BaseInventoryPlugin
-from ansible.module_utils.common.text.converters import to_native, to_text
+from ansible.module_utils.common.text.converters import to_text
from ansible.module_utils.ansible_release import __version__ as ansible_version
-from ansible.module_utils.six.moves.urllib.parse import urljoin
+
+from ansible_collections.community.general.plugins.plugin_utils.unsafe import make_unsafe
class InventoryModule(BaseInventoryPlugin):
@@ -134,7 +136,7 @@ class InventoryModule(BaseInventoryPlugin):
try:
response = open_url(url, headers=self.headers)
except Exception as e:
- self.display.warning("An error happened while fetching: %s" % url)
+ self.display.warning(f"An error happened while fetching: {url}")
return None
try:
@@ -169,20 +171,20 @@ class InventoryModule(BaseInventoryPlugin):
"support"
)
for attribute in targeted_attributes:
- self.inventory.set_variable(hostname, attribute, host_infos[attribute])
+ self.inventory.set_variable(hostname, attribute, make_unsafe(host_infos[attribute]))
if self.extract_public_ipv4(host_infos=host_infos):
- self.inventory.set_variable(hostname, "public_ipv4", self.extract_public_ipv4(host_infos=host_infos))
- self.inventory.set_variable(hostname, "ansible_host", self.extract_public_ipv4(host_infos=host_infos))
+ self.inventory.set_variable(hostname, "public_ipv4", make_unsafe(self.extract_public_ipv4(host_infos=host_infos)))
+ self.inventory.set_variable(hostname, "ansible_host", make_unsafe(self.extract_public_ipv4(host_infos=host_infos)))
if self.extract_private_ipv4(host_infos=host_infos):
- self.inventory.set_variable(hostname, "public_ipv4", self.extract_private_ipv4(host_infos=host_infos))
+ self.inventory.set_variable(hostname, "public_ipv4", make_unsafe(self.extract_private_ipv4(host_infos=host_infos)))
if self.extract_os_name(host_infos=host_infos):
- self.inventory.set_variable(hostname, "os_name", self.extract_os_name(host_infos=host_infos))
+ self.inventory.set_variable(hostname, "os_name", make_unsafe(self.extract_os_name(host_infos=host_infos)))
if self.extract_os_version(host_infos=host_infos):
- self.inventory.set_variable(hostname, "os_version", self.extract_os_name(host_infos=host_infos))
+ self.inventory.set_variable(hostname, "os_version", make_unsafe(self.extract_os_name(host_infos=host_infos)))
def _filter_host(self, host_infos, hostname_preferences):
@@ -201,6 +203,8 @@ class InventoryModule(BaseInventoryPlugin):
if not hostname:
return
+ hostname = make_unsafe(hostname)
+
self.inventory.add_host(host=hostname)
self._fill_host_variables(hostname=hostname, host_infos=host_infos)
@@ -210,6 +214,8 @@ class InventoryModule(BaseInventoryPlugin):
if not group:
return
+ group = make_unsafe(group)
+
self.inventory.add_group(group=group)
self.inventory.add_host(group=group, host=hostname)
@@ -237,8 +243,8 @@ class InventoryModule(BaseInventoryPlugin):
}
self.headers = {
- 'Authorization': "Bearer %s" % token,
- 'User-Agent': "ansible %s Python %s" % (ansible_version, python_version.split(' ', 1)[0]),
+ 'Authorization': f"Bearer {token}",
+ 'User-Agent': f"ansible {ansible_version} Python {python_version.split(' ', 1)[0]}",
'Content-type': 'application/json'
}
diff --git a/plugins/inventory/opennebula.py b/plugins/inventory/opennebula.py
index f46ad73c57..26f7a21d88 100644
--- a/plugins/inventory/opennebula.py
+++ b/plugins/inventory/opennebula.py
@@ -1,83 +1,79 @@
-# -*- coding: utf-8 -*-
# Copyright (c) 2020, FELDSAM s.r.o. - FeldHost™
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import (absolute_import, division, print_function)
+from __future__ import annotations
-__metaclass__ = type
-DOCUMENTATION = r'''
- name: opennebula
- author:
- - Kristian Feldsam (@feldsam)
- short_description: OpenNebula inventory source
- version_added: "3.8.0"
- extends_documentation_fragment:
- - constructed
+DOCUMENTATION = r"""
+name: opennebula
+author:
+ - Kristian Feldsam (@feldsam)
+short_description: OpenNebula inventory source
+version_added: "3.8.0"
+extends_documentation_fragment:
+ - constructed
+description:
+ - Get inventory hosts from OpenNebula cloud.
+ - Uses an YAML configuration file ending with either C(opennebula.yml) or C(opennebula.yaml) to set parameter values.
+ - Uses O(api_authfile), C(~/.one/one_auth), or E(ONE_AUTH) pointing to a OpenNebula credentials file.
+options:
+ plugin:
+ description: Token that ensures this is a source file for the 'opennebula' plugin.
+ type: string
+ required: true
+ choices: [community.general.opennebula]
+ api_url:
description:
- - Get inventory hosts from OpenNebula cloud.
- - Uses an YAML configuration file ending with either I(opennebula.yml) or I(opennebula.yaml)
- to set parameter values.
- - Uses I(api_authfile), C(~/.one/one_auth), or C(ONE_AUTH) pointing to a OpenNebula credentials file.
- options:
- plugin:
- description: Token that ensures this is a source file for the 'opennebula' plugin.
- type: string
- required: true
- choices: [ community.general.opennebula ]
- api_url:
- description:
- - URL of the OpenNebula RPC server.
- - It is recommended to use HTTPS so that the username/password are not
- transferred over the network unencrypted.
- - If not set then the value of the C(ONE_URL) environment variable is used.
- env:
- - name: ONE_URL
- required: True
- type: string
- api_username:
- description:
- - Name of the user to login into the OpenNebula RPC server. If not set
- then the value of the C(ONE_USERNAME) environment variable is used.
- env:
- - name: ONE_USERNAME
- type: string
- api_password:
- description:
- - Password or a token of the user to login into OpenNebula RPC server.
- - If not set, the value of the C(ONE_PASSWORD) environment variable is used.
- env:
- - name: ONE_PASSWORD
- required: False
- type: string
- api_authfile:
- description:
- - If both I(api_username) or I(api_password) are not set, then it will try
- authenticate with ONE auth file. Default path is C(~/.one/one_auth).
- - Set environment variable C(ONE_AUTH) to override this path.
- env:
- - name: ONE_AUTH
- required: False
- type: string
- hostname:
- description: Field to match the hostname. Note C(v4_first_ip) corresponds to the first IPv4 found on VM.
- type: string
- default: v4_first_ip
- choices:
- - v4_first_ip
- - v6_first_ip
- - name
- filter_by_label:
- description: Only return servers filtered by this label.
- type: string
- group_by_labels:
- description: Create host groups by vm labels
- type: bool
- default: True
-'''
+ - URL of the OpenNebula RPC server.
+ - It is recommended to use HTTPS so that the username/password are not transferred over the network unencrypted.
+ - If not set then the value of the E(ONE_URL) environment variable is used.
+ env:
+ - name: ONE_URL
+ required: true
+ type: string
+ api_username:
+ description:
+ - Name of the user to login into the OpenNebula RPC server. If not set then the value of the E(ONE_USERNAME) environment
+ variable is used.
+ env:
+ - name: ONE_USERNAME
+ type: string
+ api_password:
+ description:
+ - Password or a token of the user to login into OpenNebula RPC server.
+ - If not set, the value of the E(ONE_PASSWORD) environment variable is used.
+ env:
+ - name: ONE_PASSWORD
+ required: false
+ type: string
+ api_authfile:
+ description:
+ - If both O(api_username) or O(api_password) are not set, then it tries to authenticate with ONE auth file. Default
+ path is C(~/.one/one_auth).
+ - Set environment variable E(ONE_AUTH) to override this path.
+ env:
+ - name: ONE_AUTH
+ required: false
+ type: string
+ hostname:
+ description: Field to match the hostname. Note V(v4_first_ip) corresponds to the first IPv4 found on VM.
+ type: string
+ default: v4_first_ip
+ choices:
+ - v4_first_ip
+ - v6_first_ip
+ - name
+ filter_by_label:
+ description: Only return servers filtered by this label.
+ type: string
+ group_by_labels:
+ description: Create host groups by VM labels.
+ type: bool
+ default: true
+"""
-EXAMPLES = r'''
+EXAMPLES = r"""
# inventory_opennebula.yml file in YAML format
# Example command line: ansible-inventory --list -i inventory_opennebula.yml
@@ -85,7 +81,7 @@ EXAMPLES = r'''
plugin: community.general.opennebula
api_url: https://opennebula:2633/RPC2
filter_by_label: Cache
-'''
+"""
try:
import pyone
@@ -96,7 +92,8 @@ except ImportError:
from ansible.errors import AnsibleError
from ansible.plugins.inventory import BaseInventoryPlugin, Constructable
-from ansible.module_utils.common.text.converters import to_native
+
+from ansible_collections.community.general.plugins.plugin_utils.unsafe import make_unsafe
from collections import namedtuple
import os
@@ -126,9 +123,9 @@ class InventoryModule(BaseInventoryPlugin, Constructable):
authstring = fp.read().rstrip()
username, password = authstring.split(":")
except (OSError, IOError):
- raise AnsibleError("Could not find or read ONE_AUTH file at '{e}'".format(e=authfile))
+ raise AnsibleError(f"Could not find or read ONE_AUTH file at '{authfile}'")
except Exception:
- raise AnsibleError("Error occurs when reading ONE_AUTH file at '{e}'".format(e=authfile))
+ raise AnsibleError(f"Error occurs when reading ONE_AUTH file at '{authfile}'")
auth_params = namedtuple('auth', ('url', 'username', 'password'))
@@ -141,7 +138,8 @@ class InventoryModule(BaseInventoryPlugin, Constructable):
nic = [nic]
for net in nic:
- return net['IP']
+ if net.get('IP'):
+ return net['IP']
return False
@@ -163,13 +161,13 @@ class InventoryModule(BaseInventoryPlugin, Constructable):
if not (auth.username and auth.password):
raise AnsibleError('API Credentials missing. Check OpenNebula inventory file.')
else:
- one_client = pyone.OneServer(auth.url, session=auth.username + ':' + auth.password)
+ one_client = pyone.OneServer(auth.url, session=f"{auth.username}:{auth.password}")
# get hosts (VMs)
try:
vm_pool = one_client.vmpool.infoextended(-2, -1, -1, 3)
except Exception as e:
- raise AnsibleError("Something happened during XML-RPC call: {e}".format(e=to_native(e)))
+ raise AnsibleError(f"Something happened during XML-RPC call: {e}")
return vm_pool
@@ -196,6 +194,9 @@ class InventoryModule(BaseInventoryPlugin, Constructable):
continue
server['name'] = vm.NAME
+ server['id'] = vm.ID
+ if hasattr(vm.HISTORY_RECORDS, 'HISTORY') and vm.HISTORY_RECORDS.HISTORY:
+ server['host'] = vm.HISTORY_RECORDS.HISTORY[-1].HOSTNAME
server['LABELS'] = labels
server['v4_first_ip'] = self._get_vm_ipv4(vm)
server['v6_first_ip'] = self._get_vm_ipv6(vm)
@@ -215,6 +216,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable):
filter_by_label = self.get_option('filter_by_label')
servers = self._retrieve_servers(filter_by_label)
for server in servers:
+ server = make_unsafe(server)
hostname = server['name']
# check for labels
if group_by_labels and server['LABELS']:
diff --git a/plugins/inventory/proxmox.py b/plugins/inventory/proxmox.py
deleted file mode 100644
index e13a08a55c..0000000000
--- a/plugins/inventory/proxmox.py
+++ /dev/null
@@ -1,666 +0,0 @@
-# -*- coding: utf-8 -*-
-# Copyright (C) 2016 Guido Günther , Daniel Lobato Garcia
-# Copyright (c) 2018 Ansible Project
-# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
-# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import (absolute_import, division, print_function)
-
-__metaclass__ = type
-
-DOCUMENTATION = '''
- name: proxmox
- short_description: Proxmox inventory source
- version_added: "1.2.0"
- author:
- - Jeffrey van Pelt (@Thulium-Drake)
- requirements:
- - requests >= 1.1
- description:
- - Get inventory hosts from a Proxmox PVE cluster.
- - "Uses a configuration file as an inventory source, it must end in C(.proxmox.yml) or C(.proxmox.yaml)"
- - Will retrieve the first network interface with an IP for Proxmox nodes.
- - Can retrieve LXC/QEMU configuration as facts.
- extends_documentation_fragment:
- - constructed
- - inventory_cache
- options:
- plugin:
- description: The name of this plugin, it should always be set to C(community.general.proxmox) for this plugin to recognize it as it's own.
- required: yes
- choices: ['community.general.proxmox']
- type: str
- url:
- description:
- - URL to Proxmox cluster.
- - If the value is not specified in the inventory configuration, the value of environment variable C(PROXMOX_URL) will be used instead.
- - Since community.general 4.7.0 you can also use templating to specify the value of the I(url).
- default: 'http://localhost:8006'
- type: str
- env:
- - name: PROXMOX_URL
- version_added: 2.0.0
- user:
- description:
- - Proxmox authentication user.
- - If the value is not specified in the inventory configuration, the value of environment variable C(PROXMOX_USER) will be used instead.
- - Since community.general 4.7.0 you can also use templating to specify the value of the I(user).
- required: yes
- type: str
- env:
- - name: PROXMOX_USER
- version_added: 2.0.0
- password:
- description:
- - Proxmox authentication password.
- - If the value is not specified in the inventory configuration, the value of environment variable C(PROXMOX_PASSWORD) will be used instead.
- - Since community.general 4.7.0 you can also use templating to specify the value of the I(password).
- - If you do not specify a password, you must set I(token_id) and I(token_secret) instead.
- type: str
- env:
- - name: PROXMOX_PASSWORD
- version_added: 2.0.0
- token_id:
- description:
- - Proxmox authentication token ID.
- - If the value is not specified in the inventory configuration, the value of environment variable C(PROXMOX_TOKEN_ID) will be used instead.
- - To use token authentication, you must also specify I(token_secret). If you do not specify I(token_id) and I(token_secret),
- you must set a password instead.
- - Make sure to grant explicit pve permissions to the token or disable 'privilege separation' to use the users' privileges instead.
- version_added: 4.8.0
- type: str
- env:
- - name: PROXMOX_TOKEN_ID
- token_secret:
- description:
- - Proxmox authentication token secret.
- - If the value is not specified in the inventory configuration, the value of environment variable C(PROXMOX_TOKEN_SECRET) will be used instead.
- - To use token authentication, you must also specify I(token_id). If you do not specify I(token_id) and I(token_secret),
- you must set a password instead.
- version_added: 4.8.0
- type: str
- env:
- - name: PROXMOX_TOKEN_SECRET
- validate_certs:
- description: Verify SSL certificate if using HTTPS.
- type: boolean
- default: yes
- group_prefix:
- description: Prefix to apply to Proxmox groups.
- default: proxmox_
- type: str
- facts_prefix:
- description: Prefix to apply to LXC/QEMU config facts.
- default: proxmox_
- type: str
- want_facts:
- description:
- - Gather LXC/QEMU configuration facts.
- - When I(want_facts) is set to C(true) more details about QEMU VM status are possible, besides the running and stopped states.
- Currently if the VM is running and it is suspended, the status will be running and the machine will be in C(running) group,
- but its actual state will be paused. See I(qemu_extended_statuses) for how to retrieve the real status.
- default: no
- type: bool
- qemu_extended_statuses:
- description:
- - Requires I(want_facts) to be set to C(true) to function. This will allow you to differentiate betweend C(paused) and C(prelaunch)
- statuses of the QEMU VMs.
- - This introduces multiple groups [prefixed with I(group_prefix)] C(prelaunch) and C(paused).
- default: no
- type: bool
- version_added: 5.1.0
- want_proxmox_nodes_ansible_host:
- version_added: 3.0.0
- description:
- - Whether to set C(ansbile_host) for proxmox nodes.
- - When set to C(true) (default), will use the first available interface. This can be different from what you expect.
- - This currently defaults to C(true), but the default is deprecated since community.general 4.8.0.
- The default will change to C(false) in community.general 6.0.0. To avoid a deprecation warning, please
- set this parameter explicitly.
- type: bool
- filters:
- version_added: 4.6.0
- description: A list of Jinja templates that allow filtering hosts.
- type: list
- elements: str
- default: []
- strict:
- version_added: 2.5.0
- compose:
- version_added: 2.5.0
- groups:
- version_added: 2.5.0
- keyed_groups:
- version_added: 2.5.0
-'''
-
-EXAMPLES = '''
-# Minimal example which will not gather additional facts for QEMU/LXC guests
-# By not specifying a URL the plugin will attempt to connect to the controller host on port 8006
-# my.proxmox.yml
-plugin: community.general.proxmox
-user: ansible@pve
-password: secure
-# Note that this can easily give you wrong values as ansible_host. See further below for
-# an example where this is set to `false` and where ansible_host is set with `compose`.
-want_proxmox_nodes_ansible_host: true
-
-# Instead of login with password, proxmox supports api token authentication since release 6.2.
-plugin: community.general.proxmox
-user: ci@pve
-token_id: gitlab-1
-token_secret: fa256e9c-26ab-41ec-82da-707a2c079829
-
-# The secret can also be a vault string or passed via the environment variable TOKEN_SECRET.
-token_secret: !vault |
- $ANSIBLE_VAULT;1.1;AES256
- 62353634333163633336343265623632626339313032653563653165313262343931643431656138
- 6134333736323265656466646539663134306166666237630a653363623262636663333762316136
- 34616361326263383766366663393837626437316462313332663736623066656237386531663731
- 3037646432383064630a663165303564623338666131353366373630656661333437393937343331
- 32643131386134396336623736393634373936356332623632306561356361323737313663633633
- 6231313333666361656537343562333337323030623732323833
-
-# More complete example demonstrating the use of 'want_facts' and the constructed options
-# Note that using facts returned by 'want_facts' in constructed options requires 'want_facts=true'
-# my.proxmox.yml
-plugin: community.general.proxmox
-url: http://pve.domain.com:8006
-user: ansible@pve
-password: secure
-validate_certs: false
-want_facts: true
-keyed_groups:
- # proxmox_tags_parsed is an example of a fact only returned when 'want_facts=true'
- - key: proxmox_tags_parsed
- separator: ""
- prefix: group
-groups:
- webservers: "'web' in (proxmox_tags_parsed|list)"
- mailservers: "'mail' in (proxmox_tags_parsed|list)"
-compose:
- ansible_port: 2222
-# Note that this can easily give you wrong values as ansible_host. See further below for
-# an example where this is set to `false` and where ansible_host is set with `compose`.
-want_proxmox_nodes_ansible_host: true
-
-# Using the inventory to allow ansible to connect via the first IP address of the VM / Container
-# (Default is connection by name of QEMU/LXC guests)
-# Note: my_inv_var demonstrates how to add a string variable to every host used by the inventory.
-# my.proxmox.yml
-plugin: community.general.proxmox
-url: http://pve.domain.com:8006
-user: ansible@pve
-password: secure
-validate_certs: false
-want_facts: true
-want_proxmox_nodes_ansible_host: false
-compose:
- ansible_host: proxmox_ipconfig0.ip | default(proxmox_net0.ip) | ipaddr('address')
- my_inv_var_1: "'my_var1_value'"
- my_inv_var_2: >
- "my_var_2_value"
-
-# Specify the url, user and password using templating
-# my.proxmox.yml
-plugin: community.general.proxmox
-url: "{{ lookup('ansible.builtin.ini', 'url', section='proxmox', file='file.ini') }}"
-user: "{{ lookup('ansible.builtin.env','PM_USER') | default('ansible@pve') }}"
-password: "{{ lookup('community.general.random_string', base64=True) }}"
-# Note that this can easily give you wrong values as ansible_host. See further up for
-# an example where this is set to `false` and where ansible_host is set with `compose`.
-want_proxmox_nodes_ansible_host: true
-
-'''
-
-import itertools
-import re
-
-from ansible.module_utils.common._collections_compat import MutableMapping
-
-from ansible.errors import AnsibleError
-from ansible.plugins.inventory import BaseInventoryPlugin, Constructable, Cacheable
-from ansible.module_utils.common.text.converters import to_native
-from ansible.module_utils.six import string_types
-from ansible.module_utils.six.moves.urllib.parse import urlencode
-from ansible.utils.display import Display
-from ansible.template import Templar
-
-from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
-
-# 3rd party imports
-try:
- import requests
- if LooseVersion(requests.__version__) < LooseVersion('1.1.0'):
- raise ImportError
- HAS_REQUESTS = True
-except ImportError:
- HAS_REQUESTS = False
-
-display = Display()
-
-
-class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
- ''' Host inventory parser for ansible using Proxmox as source. '''
-
- NAME = 'community.general.proxmox'
-
- def __init__(self):
-
- super(InventoryModule, self).__init__()
-
- # from config
- self.proxmox_url = None
-
- self.session = None
- self.cache_key = None
- self.use_cache = None
-
- def verify_file(self, path):
-
- valid = False
- if super(InventoryModule, self).verify_file(path):
- if path.endswith(('proxmox.yaml', 'proxmox.yml')):
- valid = True
- else:
- self.display.vvv('Skipping due to inventory source not ending in "proxmox.yaml" nor "proxmox.yml"')
- return valid
-
- def _get_session(self):
- if not self.session:
- self.session = requests.session()
- self.session.verify = self.get_option('validate_certs')
- return self.session
-
- def _get_auth(self):
- credentials = urlencode({'username': self.proxmox_user, 'password': self.proxmox_password, })
-
- if self.proxmox_password:
-
- credentials = urlencode({'username': self.proxmox_user, 'password': self.proxmox_password, })
-
- a = self._get_session()
- ret = a.post('%s/api2/json/access/ticket' % self.proxmox_url, data=credentials)
-
- json = ret.json()
-
- self.headers = {
- # only required for POST/PUT/DELETE methods, which we are not using currently
- # 'CSRFPreventionToken': json['data']['CSRFPreventionToken'],
- 'Cookie': 'PVEAuthCookie={0}'.format(json['data']['ticket'])
- }
-
- else:
-
- self.headers = {'Authorization': 'PVEAPIToken={0}!{1}={2}'.format(self.proxmox_user, self.proxmox_token_id, self.proxmox_token_secret)}
-
- def _get_json(self, url, ignore_errors=None):
-
- if not self.use_cache or url not in self._cache.get(self.cache_key, {}):
-
- if self.cache_key not in self._cache:
- self._cache[self.cache_key] = {'url': ''}
-
- data = []
- s = self._get_session()
- while True:
- ret = s.get(url, headers=self.headers)
- if ignore_errors and ret.status_code in ignore_errors:
- break
- ret.raise_for_status()
- json = ret.json()
-
- # process results
- # FIXME: This assumes 'return type' matches a specific query,
- # it will break if we expand the queries and they dont have different types
- if 'data' not in json:
- # /hosts/:id does not have a 'data' key
- data = json
- break
- elif isinstance(json['data'], MutableMapping):
- # /facts are returned as dict in 'data'
- data = json['data']
- break
- else:
- # /hosts 's 'results' is a list of all hosts, returned is paginated
- data = data + json['data']
- break
-
- self._cache[self.cache_key][url] = data
-
- return self._cache[self.cache_key][url]
-
- def _get_nodes(self):
- return self._get_json("%s/api2/json/nodes" % self.proxmox_url)
-
- def _get_pools(self):
- return self._get_json("%s/api2/json/pools" % self.proxmox_url)
-
- def _get_lxc_per_node(self, node):
- return self._get_json("%s/api2/json/nodes/%s/lxc" % (self.proxmox_url, node))
-
- def _get_qemu_per_node(self, node):
- return self._get_json("%s/api2/json/nodes/%s/qemu" % (self.proxmox_url, node))
-
- def _get_members_per_pool(self, pool):
- ret = self._get_json("%s/api2/json/pools/%s" % (self.proxmox_url, pool))
- return ret['members']
-
- def _get_node_ip(self, node):
- ret = self._get_json("%s/api2/json/nodes/%s/network" % (self.proxmox_url, node))
-
- for iface in ret:
- try:
- return iface['address']
- except Exception:
- return None
-
- def _get_agent_network_interfaces(self, node, vmid, vmtype):
- result = []
-
- try:
- ifaces = self._get_json(
- "%s/api2/json/nodes/%s/%s/%s/agent/network-get-interfaces" % (
- self.proxmox_url, node, vmtype, vmid
- )
- )['result']
-
- if "error" in ifaces:
- if "class" in ifaces["error"]:
- # This happens on Windows, even though qemu agent is running, the IP address
- # cannot be fetched, as it's unsupported, also a command disabled can happen.
- errorClass = ifaces["error"]["class"]
- if errorClass in ["Unsupported"]:
- self.display.v("Retrieving network interfaces from guest agents on windows with older qemu-guest-agents is not supported")
- elif errorClass in ["CommandDisabled"]:
- self.display.v("Retrieving network interfaces from guest agents has been disabled")
- return result
-
- for iface in ifaces:
- result.append({
- 'name': iface['name'],
- 'mac-address': iface['hardware-address'] if 'hardware-address' in iface else '',
- 'ip-addresses': ["%s/%s" % (ip['ip-address'], ip['prefix']) for ip in iface['ip-addresses']] if 'ip-addresses' in iface else []
- })
- except requests.HTTPError:
- pass
-
- return result
-
- def _get_vm_config(self, properties, node, vmid, vmtype, name):
- ret = self._get_json("%s/api2/json/nodes/%s/%s/%s/config" % (self.proxmox_url, node, vmtype, vmid))
-
- properties[self._fact('node')] = node
- properties[self._fact('vmid')] = vmid
- properties[self._fact('vmtype')] = vmtype
-
- plaintext_configs = [
- 'description',
- ]
-
- for config in ret:
- key = self._fact(config)
- value = ret[config]
- try:
- # fixup disk images as they have no key
- if config == 'rootfs' or config.startswith(('virtio', 'sata', 'ide', 'scsi')):
- value = ('disk_image=' + value)
-
- # Additional field containing parsed tags as list
- if config == 'tags':
- stripped_value = value.strip()
- if stripped_value:
- parsed_key = key + "_parsed"
- properties[parsed_key] = [tag.strip() for tag in stripped_value.split(",")]
-
- # The first field in the agent string tells you whether the agent is enabled
- # the rest of the comma separated string is extra config for the agent.
- # In some (newer versions of proxmox) instances it can be 'enabled=1'.
- if config == 'agent':
- agent_enabled = 0
- try:
- agent_enabled = int(value.split(',')[0])
- except ValueError:
- if value.split(',')[0] == "enabled=1":
- agent_enabled = 1
- if agent_enabled:
- agent_iface_value = self._get_agent_network_interfaces(node, vmid, vmtype)
- if agent_iface_value:
- agent_iface_key = self.to_safe('%s%s' % (key, "_interfaces"))
- properties[agent_iface_key] = agent_iface_value
-
- if config == 'lxc':
- out_val = {}
- for k, v in value:
- if k.startswith('lxc.'):
- k = k[len('lxc.'):]
- out_val[k] = v
- value = out_val
-
- if config not in plaintext_configs and isinstance(value, string_types) \
- and all("=" in v for v in value.split(",")):
- # split off strings with commas to a dict
- # skip over any keys that cannot be processed
- try:
- value = dict(key.split("=", 1) for key in value.split(","))
- except Exception:
- continue
-
- properties[key] = value
- except NameError:
- return None
-
- def _get_vm_status(self, properties, node, vmid, vmtype, name):
- ret = self._get_json("%s/api2/json/nodes/%s/%s/%s/status/current" % (self.proxmox_url, node, vmtype, vmid))
- properties[self._fact('status')] = ret['status']
- if vmtype == 'qemu':
- properties[self._fact('qmpstatus')] = ret['qmpstatus']
-
- def _get_vm_snapshots(self, properties, node, vmid, vmtype, name):
- ret = self._get_json("%s/api2/json/nodes/%s/%s/%s/snapshot" % (self.proxmox_url, node, vmtype, vmid))
- snapshots = [snapshot['name'] for snapshot in ret if snapshot['name'] != 'current']
- properties[self._fact('snapshots')] = snapshots
-
- def to_safe(self, word):
- '''Converts 'bad' characters in a string to underscores so they can be used as Ansible groups
- #> ProxmoxInventory.to_safe("foo-bar baz")
- 'foo_barbaz'
- '''
- regex = r"[^A-Za-z0-9\_]"
- return re.sub(regex, "_", word.replace(" ", ""))
-
- def _fact(self, name):
- '''Generate a fact's full name from the common prefix and a name.'''
- return self.to_safe('%s%s' % (self.facts_prefix, name.lower()))
-
- def _group(self, name):
- '''Generate a group's full name from the common prefix and a name.'''
- return self.to_safe('%s%s' % (self.group_prefix, name.lower()))
-
- def _can_add_host(self, name, properties):
- '''Ensure that a host satisfies all defined hosts filters. If strict mode is
- enabled, any error during host filter compositing will lead to an AnsibleError
- being raised, otherwise the filter will be ignored.
- '''
- for host_filter in self.host_filters:
- try:
- if not self._compose(host_filter, properties):
- return False
- except Exception as e: # pylint: disable=broad-except
- message = "Could not evaluate host filter %s for host %s - %s" % (host_filter, name, to_native(e))
- if self.strict:
- raise AnsibleError(message)
- display.warning(message)
- return True
-
- def _add_host(self, name, variables):
- self.inventory.add_host(name)
- for k, v in variables.items():
- self.inventory.set_variable(name, k, v)
- variables = self.inventory.get_host(name).get_vars()
- self._set_composite_vars(self.get_option('compose'), variables, name, strict=self.strict)
- self._add_host_to_composed_groups(self.get_option('groups'), variables, name, strict=self.strict)
- self._add_host_to_keyed_groups(self.get_option('keyed_groups'), variables, name, strict=self.strict)
-
- def _handle_item(self, node, ittype, item):
- '''Handle an item from the list of LXC containers and Qemu VM. The
- return value will be either None if the item was skipped or the name of
- the item if it was added to the inventory.'''
- if item.get('template'):
- return None
-
- properties = dict()
- name, vmid = item['name'], item['vmid']
-
- # get status, config and snapshots if want_facts == True
- want_facts = self.get_option('want_facts')
- if want_facts:
- self._get_vm_status(properties, node, vmid, ittype, name)
- self._get_vm_config(properties, node, vmid, ittype, name)
- self._get_vm_snapshots(properties, node, vmid, ittype, name)
-
- # ensure the host satisfies filters
- if not self._can_add_host(name, properties):
- return None
-
- # add the host to the inventory
- self._add_host(name, properties)
- node_type_group = self._group('%s_%s' % (node, ittype))
- self.inventory.add_child(self._group('all_' + ittype), name)
- self.inventory.add_child(node_type_group, name)
-
- item_status = item['status']
- if item_status == 'running':
- if want_facts and ittype == 'qemu' and self.get_option('qemu_extended_statuses'):
- # get more details about the status of the qemu VM
- item_status = properties.get(self._fact('qmpstatus'), item_status)
- self.inventory.add_child(self._group('all_%s' % (item_status, )), name)
-
- return name
-
- def _populate_pool_groups(self, added_hosts):
- '''Generate groups from Proxmox resource pools, ignoring VMs and
- containers that were skipped.'''
- for pool in self._get_pools():
- poolid = pool.get('poolid')
- if not poolid:
- continue
- pool_group = self._group('pool_' + poolid)
- self.inventory.add_group(pool_group)
-
- for member in self._get_members_per_pool(poolid):
- name = member.get('name')
- if name and name in added_hosts:
- self.inventory.add_child(pool_group, name)
-
- def _populate(self):
-
- # create common groups
- default_groups = ['lxc', 'qemu', 'running', 'stopped']
-
- if self.get_option('qemu_extended_statuses'):
- default_groups.extend(['prelaunch', 'paused'])
-
- for group in default_groups:
- self.inventory.add_group(self._group('all_%s' % (group)))
-
- nodes_group = self._group('nodes')
- self.inventory.add_group(nodes_group)
-
- want_proxmox_nodes_ansible_host = self.get_option("want_proxmox_nodes_ansible_host")
- if want_proxmox_nodes_ansible_host is None:
- display.deprecated(
- 'The want_proxmox_nodes_ansible_host option of the community.general.proxmox inventory plugin'
- ' currently defaults to `true`, but this default has been deprecated and will change to `false`'
- ' in community.general 6.0.0. To keep the current behavior and remove this deprecation warning,'
- ' explicitly set `want_proxmox_nodes_ansible_host` to `true` in your inventory configuration',
- version='6.0.0', collection_name='community.general')
- want_proxmox_nodes_ansible_host = True
-
- # gather vm's on nodes
- self._get_auth()
- hosts = []
- for node in self._get_nodes():
- if not node.get('node'):
- continue
-
- self.inventory.add_host(node['node'])
- if node['type'] == 'node':
- self.inventory.add_child(nodes_group, node['node'])
-
- if node['status'] == 'offline':
- continue
-
- # get node IP address
- if want_proxmox_nodes_ansible_host:
- ip = self._get_node_ip(node['node'])
- self.inventory.set_variable(node['node'], 'ansible_host', ip)
-
- # add LXC/Qemu groups for the node
- for ittype in ('lxc', 'qemu'):
- node_type_group = self._group('%s_%s' % (node['node'], ittype))
- self.inventory.add_group(node_type_group)
-
- # get LXC containers and Qemu VMs for this node
- lxc_objects = zip(itertools.repeat('lxc'), self._get_lxc_per_node(node['node']))
- qemu_objects = zip(itertools.repeat('qemu'), self._get_qemu_per_node(node['node']))
- for ittype, item in itertools.chain(lxc_objects, qemu_objects):
- name = self._handle_item(node['node'], ittype, item)
- if name is not None:
- hosts.append(name)
-
- # gather vm's in pools
- self._populate_pool_groups(hosts)
-
- def parse(self, inventory, loader, path, cache=True):
- if not HAS_REQUESTS:
- raise AnsibleError('This module requires Python Requests 1.1.0 or higher: '
- 'https://github.com/psf/requests.')
-
- super(InventoryModule, self).parse(inventory, loader, path)
-
- # read config from file, this sets 'options'
- self._read_config_data(path)
-
- t = Templar(loader=loader)
-
- # read options
- proxmox_url = self.get_option('url')
- if t.is_template(proxmox_url):
- proxmox_url = t.template(variable=proxmox_url, disable_lookups=False)
- self.proxmox_url = proxmox_url.rstrip('/')
-
- proxmox_user = self.get_option('user')
- if t.is_template(proxmox_user):
- proxmox_user = t.template(variable=proxmox_user, disable_lookups=False)
- self.proxmox_user = proxmox_user
-
- proxmox_password = self.get_option('password')
- if t.is_template(proxmox_password):
- proxmox_password = t.template(variable=proxmox_password, disable_lookups=False)
- self.proxmox_password = proxmox_password
-
- proxmox_token_id = self.get_option('token_id')
- if t.is_template(proxmox_token_id):
- proxmox_token_id = t.template(variable=proxmox_token_id, disable_lookups=False)
- self.proxmox_token_id = proxmox_token_id
-
- proxmox_token_secret = self.get_option('token_secret')
- if t.is_template(proxmox_token_secret):
- proxmox_token_secret = t.template(variable=proxmox_token_secret, disable_lookups=False)
- self.proxmox_token_secret = proxmox_token_secret
-
- if proxmox_password is None and (proxmox_token_id is None or proxmox_token_secret is None):
- raise AnsibleError('You must specify either a password or both token_id and token_secret.')
-
- if self.get_option('qemu_extended_statuses') and not self.get_option('want_facts'):
- raise AnsibleError('You must set want_facts to True if you want to use qemu_extended_statuses.')
-
- self.cache_key = self.get_cache_key(path)
- self.use_cache = cache and self.get_option('cache')
- self.host_filters = self.get_option('filters')
- self.group_prefix = self.get_option('group_prefix')
- self.facts_prefix = self.get_option('facts_prefix')
- self.strict = self.get_option('strict')
-
- # actually populate inventory
- self._populate()
diff --git a/plugins/inventory/scaleway.py b/plugins/inventory/scaleway.py
index 4404038270..59c19b498b 100644
--- a/plugins/inventory/scaleway.py
+++ b/plugins/inventory/scaleway.py
@@ -1,80 +1,85 @@
-# -*- coding: utf-8 -*-
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import (absolute_import, division, print_function)
+from __future__ import annotations
-__metaclass__ = type
-DOCUMENTATION = r'''
- name: scaleway
- author:
- - Remy Leone (@remyleone)
- short_description: Scaleway inventory source
+DOCUMENTATION = r"""
+name: scaleway
+author:
+ - Remy Leone (@remyleone)
+short_description: Scaleway inventory source
+description:
+ - Get inventory hosts from Scaleway.
+requirements:
+ - PyYAML
+options:
+ plugin:
+ description: Token that ensures this is a source file for the 'scaleway' plugin.
+ required: true
+ type: string
+ choices: ['scaleway', 'community.general.scaleway']
+ regions:
+ description: Filter results on a specific Scaleway region.
+ type: list
+ elements: string
+ default:
+ - ams1
+ - ams2
+ - ams3
+ - par1
+ - par2
+ - par3
+ - waw1
+ - waw2
+ - waw3
+ tags:
+ description: Filter results on a specific tag.
+ type: list
+ elements: string
+ scw_profile:
description:
- - Get inventory hosts from Scaleway.
- requirements:
- - PyYAML
- options:
- plugin:
- description: Token that ensures this is a source file for the 'scaleway' plugin.
- required: True
- choices: ['scaleway', 'community.general.scaleway']
- regions:
- description: Filter results on a specific Scaleway region.
- type: list
- elements: string
- default:
- - ams1
- - par1
- - par2
- - waw1
- tags:
- description: Filter results on a specific tag.
- type: list
- elements: string
- scw_profile:
- description:
- - The config profile to use in config file.
- - By default uses the one specified as C(active_profile) in the config file, or falls back to C(default) if that is not defined.
- type: string
- version_added: 4.4.0
- oauth_token:
- description:
- - Scaleway OAuth token.
- - If not explicitly defined or in environment variables, it will try to lookup in the scaleway-cli configuration file
- (C($SCW_CONFIG_PATH), C($XDG_CONFIG_HOME/scw/config.yaml), or C(~/.config/scw/config.yaml)).
- - More details on L(how to generate token, https://www.scaleway.com/en/docs/generate-api-keys/).
- env:
- # in order of precedence
- - name: SCW_TOKEN
- - name: SCW_API_KEY
- - name: SCW_OAUTH_TOKEN
- hostnames:
- description: List of preference about what to use as an hostname.
- type: list
- elements: string
- default:
- - public_ipv4
- choices:
- - public_ipv4
- - private_ipv4
- - public_ipv6
- - hostname
- - id
- variables:
- description: 'Set individual variables: keys are variable names and
- values are templates. Any value returned by the
- L(Scaleway API, https://developer.scaleway.com/#servers-server-get)
- can be used.'
- type: dict
-'''
+ - The config profile to use in config file.
+ - By default uses the one specified as C(active_profile) in the config file, or falls back to V(default) if that is
+ not defined.
+ type: string
+ version_added: 4.4.0
+ oauth_token:
+ description:
+ - Scaleway OAuth token.
+ - If not explicitly defined or in environment variables, it tries to lookup in the C(scaleway-cli) configuration file
+ (C($SCW_CONFIG_PATH), C($XDG_CONFIG_HOME/scw/config.yaml), or C(~/.config/scw/config.yaml)).
+ - More details on L(how to generate token, https://www.scaleway.com/en/docs/generate-api-keys/).
+ type: string
+ env:
+ # in order of precedence
+ - name: SCW_TOKEN
+ - name: SCW_API_KEY
+ - name: SCW_OAUTH_TOKEN
+ hostnames:
+ description: List of preference about what to use as an hostname.
+ type: list
+ elements: string
+ default:
+ - public_ipv4
+ choices:
+ - public_ipv4
+ - private_ipv4
+ - public_ipv6
+ - hostname
+ - id
+ variables:
+ description: 'Set individual variables: keys are variable names and values are templates. Any value returned by the L(Scaleway
+ API, https://developer.scaleway.com/#servers-server-get) can be used.'
+ type: dict
+"""
-EXAMPLES = r'''
+EXAMPLES = r"""
# scaleway_inventory.yml file in YAML format
# Example command line: ansible-inventory --list -i scaleway_inventory.yml
+---
# use hostname as inventory_hostname
# use the private IP address to connect to the host
plugin: community.general.scaleway
@@ -89,6 +94,7 @@ variables:
ansible_host: private_ip
state: state
+---
# use hostname as inventory_hostname and public IP address to connect to the host
plugin: community.general.scaleway
hostnames:
@@ -98,6 +104,7 @@ regions:
variables:
ansible_host: public_ip.address
+---
# Using static strings as variables
plugin: community.general.scaleway
hostnames:
@@ -106,7 +113,7 @@ variables:
ansible_host: public_ip.address
ansible_connection: "'ssh'"
ansible_user: "'admin'"
-'''
+"""
import os
import json
@@ -121,11 +128,11 @@ else:
from ansible.errors import AnsibleError
from ansible.plugins.inventory import BaseInventoryPlugin, Constructable
from ansible_collections.community.general.plugins.module_utils.scaleway import SCALEWAY_LOCATION, parse_pagination_link
+from ansible_collections.community.general.plugins.plugin_utils.unsafe import make_unsafe
from ansible.module_utils.urls import open_url
-from ansible.module_utils.common.text.converters import to_native, to_text
-from ansible.module_utils.six import raise_from
+from ansible.module_utils.common.text.converters import to_text
-import ansible.module_utils.six.moves.urllib.parse as urllib_parse
+import urllib.parse as urllib_parse
def _fetch_information(token, url):
@@ -137,7 +144,7 @@ def _fetch_information(token, url):
headers={'X-Auth-Token': token,
'Content-type': 'application/json'})
except Exception as e:
- raise AnsibleError("Error while fetching %s: %s" % (url, to_native(e)))
+ raise AnsibleError(f"Error while fetching {url}: {e}")
try:
raw_json = json.loads(to_text(response.read()))
except ValueError:
@@ -158,7 +165,7 @@ def _fetch_information(token, url):
def _build_server_url(api_endpoint):
- return "/".join([api_endpoint, "servers"])
+ return f"{api_endpoint}/servers"
def extract_public_ipv4(server_info):
@@ -279,7 +286,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable):
zone_info = SCALEWAY_LOCATION[zone]
url = _build_server_url(zone_info["api_endpoint"])
- raw_zone_hosts_infos = _fetch_information(url=url, token=token)
+ raw_zone_hosts_infos = make_unsafe(_fetch_information(url=url, token=token))
for host_infos in raw_zone_hosts_infos:
@@ -329,7 +336,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable):
def parse(self, inventory, loader, path, cache=True):
if YAML_IMPORT_ERROR:
- raise_from(AnsibleError('PyYAML is probably missing'), YAML_IMPORT_ERROR)
+ raise AnsibleError('PyYAML is probably missing') from YAML_IMPORT_ERROR
super(InventoryModule, self).parse(inventory, loader, path)
self._read_config_data(path=path)
@@ -341,4 +348,4 @@ class InventoryModule(BaseInventoryPlugin, Constructable):
hostname_preference = self.get_option("hostnames")
for zone in self._get_zones(config_zones):
- self.do_zone_inventory(zone=zone, token=token, tags=tags, hostname_preferences=hostname_preference)
+ self.do_zone_inventory(zone=make_unsafe(zone), token=token, tags=tags, hostname_preferences=hostname_preference)
diff --git a/plugins/inventory/stackpath_compute.py b/plugins/inventory/stackpath_compute.py
deleted file mode 100644
index 39f880e820..0000000000
--- a/plugins/inventory/stackpath_compute.py
+++ /dev/null
@@ -1,283 +0,0 @@
-# -*- coding: utf-8 -*-
-# Copyright (c) 2020 Shay Rybak
-# Copyright (c) 2020 Ansible Project
-# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-DOCUMENTATION = '''
- name: stackpath_compute
- short_description: StackPath Edge Computing inventory source
- version_added: 1.2.0
- author:
- - UNKNOWN (@shayrybak)
- extends_documentation_fragment:
- - inventory_cache
- - constructed
- description:
- - Get inventory hosts from StackPath Edge Computing.
- - Uses a YAML configuration file that ends with stackpath_compute.(yml|yaml).
- options:
- plugin:
- description:
- - A token that ensures this is a source file for the plugin.
- required: true
- choices: ['community.general.stackpath_compute']
- client_id:
- description:
- - An OAuth client ID generated from the API Management section of the StackPath customer portal
- U(https://control.stackpath.net/api-management).
- required: true
- type: str
- client_secret:
- description:
- - An OAuth client secret generated from the API Management section of the StackPath customer portal
- U(https://control.stackpath.net/api-management).
- required: true
- type: str
- stack_slugs:
- description:
- - A list of Stack slugs to query instances in. If no entry then get instances in all stacks on the account.
- type: list
- elements: str
- use_internal_ip:
- description:
- - Whether or not to use internal IP addresses, If false, uses external IP addresses, internal otherwise.
- - If an instance doesn't have an external IP it will not be returned when this option is set to false.
- type: bool
-'''
-
-EXAMPLES = '''
-# Example using credentials to fetch all workload instances in a stack.
----
-plugin: community.general.stackpath_compute
-client_id: my_client_id
-client_secret: my_client_secret
-stack_slugs:
-- my_first_stack_slug
-- my_other_stack_slug
-use_internal_ip: false
-'''
-
-import traceback
-import json
-
-from ansible.errors import AnsibleError
-from ansible.module_utils.urls import open_url
-from ansible.plugins.inventory import (
- BaseInventoryPlugin,
- Constructable,
- Cacheable
-)
-from ansible.utils.display import Display
-
-
-display = Display()
-
-
-class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
-
- NAME = 'community.general.stackpath_compute'
-
- def __init__(self):
- super(InventoryModule, self).__init__()
-
- # credentials
- self.client_id = None
- self.client_secret = None
- self.stack_slug = None
- self.api_host = "https://gateway.stackpath.com"
- self.group_keys = [
- "stackSlug",
- "workloadId",
- "cityCode",
- "countryCode",
- "continent",
- "target",
- "name",
- "workloadSlug"
- ]
-
- def _validate_config(self, config):
- if config['plugin'] != 'community.general.stackpath_compute':
- raise AnsibleError("plugin doesn't match this plugin")
- try:
- client_id = config['client_id']
- if len(client_id) != 32:
- raise AnsibleError("client_id must be 32 characters long")
- except KeyError:
- raise AnsibleError("config missing client_id, a required option")
- try:
- client_secret = config['client_secret']
- if len(client_secret) != 64:
- raise AnsibleError("client_secret must be 64 characters long")
- except KeyError:
- raise AnsibleError("config missing client_id, a required option")
- return True
-
- def _set_credentials(self):
- '''
- :param config_data: contents of the inventory config file
- '''
- self.client_id = self.get_option('client_id')
- self.client_secret = self.get_option('client_secret')
-
- def _authenticate(self):
- payload = json.dumps(
- {
- "client_id": self.client_id,
- "client_secret": self.client_secret,
- "grant_type": "client_credentials",
- }
- )
- headers = {
- "Content-Type": "application/json",
- }
- resp = open_url(
- self.api_host + '/identity/v1/oauth2/token',
- headers=headers,
- data=payload,
- method="POST"
- )
- status_code = resp.code
- if status_code == 200:
- body = resp.read()
- self.auth_token = json.loads(body)["access_token"]
-
- def _query(self):
- results = []
- workloads = []
- self._authenticate()
- for stack_slug in self.stack_slugs:
- try:
- workloads = self._stackpath_query_get_list(self.api_host + '/workload/v1/stacks/' + stack_slug + '/workloads')
- except Exception:
- raise AnsibleError("Failed to get workloads from the StackPath API: %s" % traceback.format_exc())
- for workload in workloads:
- try:
- workload_instances = self._stackpath_query_get_list(
- self.api_host + '/workload/v1/stacks/' + stack_slug + '/workloads/' + workload["id"] + '/instances'
- )
- except Exception:
- raise AnsibleError("Failed to get workload instances from the StackPath API: %s" % traceback.format_exc())
- for instance in workload_instances:
- if instance["phase"] == "RUNNING":
- instance["stackSlug"] = stack_slug
- instance["workloadId"] = workload["id"]
- instance["workloadSlug"] = workload["slug"]
- instance["cityCode"] = instance["location"]["cityCode"]
- instance["countryCode"] = instance["location"]["countryCode"]
- instance["continent"] = instance["location"]["continent"]
- instance["target"] = instance["metadata"]["labels"]["workload.platform.stackpath.net/target-name"]
- try:
- if instance[self.hostname_key]:
- results.append(instance)
- except KeyError:
- pass
- return results
-
- def _populate(self, instances):
- for instance in instances:
- for group_key in self.group_keys:
- group = group_key + "_" + instance[group_key]
- group = group.lower().replace(" ", "_").replace("-", "_")
- self.inventory.add_group(group)
- self.inventory.add_host(instance[self.hostname_key],
- group=group)
-
- def _stackpath_query_get_list(self, url):
- self._authenticate()
- headers = {
- "Content-Type": "application/json",
- "Authorization": "Bearer " + self.auth_token,
- }
- next_page = True
- result = []
- cursor = '-1'
- while next_page:
- resp = open_url(
- url + '?page_request.first=10&page_request.after=%s' % cursor,
- headers=headers,
- method="GET"
- )
- status_code = resp.code
- if status_code == 200:
- body = resp.read()
- body_json = json.loads(body)
- result.extend(body_json["results"])
- next_page = body_json["pageInfo"]["hasNextPage"]
- if next_page:
- cursor = body_json["pageInfo"]["endCursor"]
- return result
-
- def _get_stack_slugs(self, stacks):
- self.stack_slugs = [stack["slug"] for stack in stacks]
-
- def verify_file(self, path):
- '''
- :param loader: an ansible.parsing.dataloader.DataLoader object
- :param path: the path to the inventory config file
- :return the contents of the config file
- '''
- if super(InventoryModule, self).verify_file(path):
- if path.endswith(('stackpath_compute.yml', 'stackpath_compute.yaml')):
- return True
- display.debug(
- "stackpath_compute inventory filename must end with \
- 'stackpath_compute.yml' or 'stackpath_compute.yaml'"
- )
- return False
-
- def parse(self, inventory, loader, path, cache=True):
-
- super(InventoryModule, self).parse(inventory, loader, path)
-
- config = self._read_config_data(path)
- self._validate_config(config)
- self._set_credentials()
-
- # get user specifications
- self.use_internal_ip = self.get_option('use_internal_ip')
- if self.use_internal_ip:
- self.hostname_key = "ipAddress"
- else:
- self.hostname_key = "externalIpAddress"
-
- self.stack_slugs = self.get_option('stack_slugs')
- if not self.stack_slugs:
- try:
- stacks = self._stackpath_query_get_list(self.api_host + '/stack/v1/stacks')
- self._get_stack_slugs(stacks)
- except Exception:
- raise AnsibleError("Failed to get stack IDs from the Stackpath API: %s" % traceback.format_exc())
-
- cache_key = self.get_cache_key(path)
- # false when refresh_cache or --flush-cache is used
- if cache:
- # get the user-specified directive
- cache = self.get_option('cache')
-
- # Generate inventory
- cache_needs_update = False
- if cache:
- try:
- results = self._cache[cache_key]
- except KeyError:
- # if cache expires or cache file doesn't exist
- cache_needs_update = True
-
- if not cache or cache_needs_update:
- results = self._query()
-
- self._populate(results)
-
- # If the cache has expired/doesn't exist or
- # if refresh_inventory/flush cache is used
- # when the user is using caching, update the cached inventory
- try:
- if cache_needs_update or (not cache and self.get_option('cache')):
- self._cache[cache_key] = results
- except Exception:
- raise AnsibleError("Failed to populate data: %s" % traceback.format_exc())
diff --git a/plugins/inventory/virtualbox.py b/plugins/inventory/virtualbox.py
index a8d186bb30..564db57dac 100644
--- a/plugins/inventory/virtualbox.py
+++ b/plugins/inventory/virtualbox.py
@@ -1,68 +1,88 @@
-# -*- coding: utf-8 -*-
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
-DOCUMENTATION = '''
- author: Unknown (!UNKNOWN)
- name: virtualbox
- short_description: virtualbox inventory source
+DOCUMENTATION = r"""
+author: Unknown (!UNKNOWN)
+name: virtualbox
+short_description: Virtualbox inventory source
+description:
+ - Get inventory hosts from the local virtualbox installation.
+ - Uses a YAML configuration file that ends with virtualbox.(yml|yaml) or vbox.(yml|yaml).
+ - The inventory_hostname is always the 'Name' of the virtualbox instance.
+ - Groups can be assigned to the VMs using C(VBoxManage). Multiple groups can be assigned by using V(/) as a delimeter.
+ - A separate parameter, O(enable_advanced_group_parsing) is exposed to change grouping behaviour. See the parameter documentation
+ for details.
+extends_documentation_fragment:
+ - constructed
+ - inventory_cache
+options:
+ plugin:
+ description: Token that ensures this is a source file for the P(community.general.virtualbox#inventory) plugin.
+ type: string
+ required: true
+ choices: ['virtualbox', 'community.general.virtualbox']
+ running_only:
+ description: Toggles showing all VMs instead of only those currently running.
+ type: boolean
+ default: false
+ settings_password_file:
+ description: Provide a file containing the settings password (equivalent to C(--settingspwfile)).
+ type: string
+ network_info_path:
+ description: Property path to query for network information (C(ansible_host)).
+ type: string
+ default: "/VirtualBox/GuestInfo/Net/0/V4/IP"
+ query:
+ description: Create vars from virtualbox properties.
+ type: dictionary
+ default: {}
+ enable_advanced_group_parsing:
description:
- - Get inventory hosts from the local virtualbox installation.
- - Uses a YAML configuration file that ends with virtualbox.(yml|yaml) or vbox.(yml|yaml).
- - The inventory_hostname is always the 'Name' of the virtualbox instance.
- extends_documentation_fragment:
- - constructed
- - inventory_cache
- options:
- plugin:
- description: token that ensures this is a source file for the 'virtualbox' plugin
- required: True
- choices: ['virtualbox', 'community.general.virtualbox']
- running_only:
- description: toggles showing all vms vs only those currently running
- type: boolean
- default: False
- settings_password_file:
- description: provide a file containing the settings password (equivalent to --settingspwfile)
- network_info_path:
- description: property path to query for network information (ansible_host)
- default: "/VirtualBox/GuestInfo/Net/0/V4/IP"
- query:
- description: create vars from virtualbox properties
- type: dictionary
- default: {}
-'''
+ - The default group parsing rule (when this setting is set to V(false)) is to split the VirtualBox VM's group based
+ on the V(/) character and assign the resulting list elements as an Ansible Group.
+ - Setting O(enable_advanced_group_parsing=true) changes this behaviour to match VirtualBox's interpretation of groups
+ according to U(https://www.virtualbox.org/manual/UserManual.html#gui-vmgroups). Groups are now split using the V(,)
+ character, and the V(/) character indicates nested groups.
+ - When enabled, a VM that's been configured using V(VBoxManage modifyvm "vm01" --groups "/TestGroup/TestGroup2,/TestGroup3")
+ results in the group C(TestGroup2) being a child group of C(TestGroup); and the VM being a part of C(TestGroup2)
+ and C(TestGroup3).
+ default: false
+ type: bool
+ version_added: 9.2.0
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
+---
# file must be named vbox.yaml or vbox.yml
-simple_config_file:
- plugin: community.general.virtualbox
- settings_password_file: /etc/virtulbox/secrets
- query:
- logged_in_users: /VirtualBox/GuestInfo/OS/LoggedInUsersList
- compose:
- ansible_connection: ('indows' in vbox_Guest_OS)|ternary('winrm', 'ssh')
+plugin: community.general.virtualbox
+settings_password_file: /etc/virtualbox/secrets
+query:
+ logged_in_users: /VirtualBox/GuestInfo/OS/LoggedInUsersList
+compose:
+ ansible_connection: ('indows' in vbox_Guest_OS)|ternary('winrm', 'ssh')
+---
# add hosts (all match with minishift vm) to the group container if any of the vms are in ansible_inventory'
plugin: community.general.virtualbox
groups:
container: "'minis' in (inventory_hostname)"
-'''
+"""
import os
from subprocess import Popen, PIPE
from ansible.errors import AnsibleParserError
-from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text
-from ansible.module_utils.common._collections_compat import MutableMapping
+from ansible.module_utils.common.text.converters import to_bytes, to_text
+from collections.abc import MutableMapping
from ansible.plugins.inventory import BaseInventoryPlugin, Constructable, Cacheable
from ansible.module_utils.common.process import get_bin_path
+from ansible_collections.community.general.plugins.plugin_utils.unsafe import make_unsafe
+
class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
''' Host inventory parser for ansible using local virtualbox. '''
@@ -116,6 +136,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
self._add_host_to_keyed_groups(self.get_option('keyed_groups'), hostvars[host], host, strict=strict)
def _populate_from_cache(self, source_data):
+ source_data = make_unsafe(source_data)
hostvars = source_data.pop('_meta', {}).get('hostvars', {})
for group in source_data:
if group == 'all':
@@ -162,7 +183,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
v = v.strip()
# found host
if k.startswith('Name') and ',' not in v: # some setting strings appear in Name
- current_host = v
+ current_host = make_unsafe(v)
if current_host not in hostvars:
hostvars[current_host] = {}
self.inventory.add_host(current_host)
@@ -170,29 +191,29 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
# try to get network info
netdata = self._query_vbox_data(current_host, netinfo)
if netdata:
- self.inventory.set_variable(current_host, 'ansible_host', netdata)
+ self.inventory.set_variable(current_host, 'ansible_host', make_unsafe(netdata))
# found groups
elif k == 'Groups':
- for group in v.split('/'):
- if group:
- group = self.inventory.add_group(group)
- self.inventory.add_child(group, current_host)
- if group not in cacheable_results:
- cacheable_results[group] = {'hosts': []}
- cacheable_results[group]['hosts'].append(current_host)
+ if self.get_option('enable_advanced_group_parsing'):
+ self._handle_vboxmanage_group_string(v, current_host, cacheable_results)
+ else:
+ self._handle_group_string(v, current_host, cacheable_results)
continue
else:
# found vars, accumulate in hostvars for clean inventory set
- pref_k = 'vbox_' + k.strip().replace(' ', '_')
- if k.startswith(' '):
- if prevkey not in hostvars[current_host]:
+ pref_k = make_unsafe(f"vbox_{k.strip().replace(' ', '_')}")
+ leading_spaces = len(k) - len(k.lstrip(' '))
+ if 0 < leading_spaces <= 2:
+ if prevkey not in hostvars[current_host] or not isinstance(hostvars[current_host][prevkey], dict):
hostvars[current_host][prevkey] = {}
- hostvars[current_host][prevkey][pref_k] = v
+ hostvars[current_host][prevkey][pref_k] = make_unsafe(v)
+ elif leading_spaces > 2:
+ continue
else:
if v != '':
- hostvars[current_host][pref_k] = v
+ hostvars[current_host][pref_k] = make_unsafe(v)
if self._ungrouped_host(current_host, cacheable_results):
if 'ungrouped' not in cacheable_results:
cacheable_results['ungrouped'] = {'hosts': []}
@@ -220,6 +241,64 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
return all(find_host(host, inventory))
+ def _handle_group_string(self, vboxmanage_group, current_host, cacheable_results):
+ '''Handles parsing the VM's Group assignment from VBoxManage according to this inventory's initial implementation.'''
+ # The original implementation of this inventory plugin treated `/` as
+ # a delimeter to split and use as Ansible Groups.
+ for group in vboxmanage_group.split('/'):
+ if group:
+ group = make_unsafe(group)
+ group = self.inventory.add_group(group)
+ self.inventory.add_child(group, current_host)
+ if group not in cacheable_results:
+ cacheable_results[group] = {'hosts': []}
+ cacheable_results[group]['hosts'].append(current_host)
+
+ def _handle_vboxmanage_group_string(self, vboxmanage_group, current_host, cacheable_results):
+ '''Handles parsing the VM's Group assignment from VBoxManage according to VirtualBox documentation.'''
+ # Per the VirtualBox documentation, a VM can be part of many groups,
+ # and it is possible to have nested groups.
+ # Many groups are separated by commas ",", and nested groups use
+ # slash "/".
+ # https://www.virtualbox.org/manual/UserManual.html#gui-vmgroups
+ # Multi groups: VBoxManage modifyvm "vm01" --groups "/TestGroup,/TestGroup2"
+ # Nested groups: VBoxManage modifyvm "vm01" --groups "/TestGroup/TestGroup2"
+
+ for group in vboxmanage_group.split(','):
+ if not group:
+ # We could get an empty element due how to split works, and
+ # possible assignments from VirtualBox. e.g. ,/Group1
+ continue
+
+ if group == "/":
+ # This is the "root" group. We get here if the VM was not
+ # assigned to a particular group. Consider the host to be
+ # unassigned to a group.
+ continue
+
+ parent_group = None
+ for subgroup in group.split('/'):
+ if not subgroup:
+ # Similarly to above, we could get an empty element.
+ # e.g //Group1
+ continue
+
+ if subgroup == '/':
+ # "root" group.
+ # Consider the host to be unassigned
+ continue
+
+ subgroup = make_unsafe(subgroup)
+ subgroup = self.inventory.add_group(subgroup)
+ if parent_group is not None:
+ self.inventory.add_child(parent_group, subgroup)
+ self.inventory.add_child(subgroup, current_host)
+ if subgroup not in cacheable_results:
+ cacheable_results[subgroup] = {'hosts': []}
+ cacheable_results[subgroup]['hosts'].append(current_host)
+
+ parent_group = subgroup
+
def verify_file(self, path):
valid = False
@@ -273,7 +352,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
try:
p = Popen(cmd, stdout=PIPE)
except Exception as e:
- raise AnsibleParserError(to_native(e))
+ raise AnsibleParserError(str(e))
source_data = p.stdout.read().splitlines()
diff --git a/plugins/inventory/xen_orchestra.py b/plugins/inventory/xen_orchestra.py
index 0b064b14db..fc0f0db757 100644
--- a/plugins/inventory/xen_orchestra.py
+++ b/plugins/inventory/xen_orchestra.py
@@ -1,67 +1,84 @@
-# -*- coding: utf-8 -*-
# Copyright (c) 2021 Ansible Project
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
-DOCUMENTATION = '''
- name: xen_orchestra
- short_description: Xen Orchestra inventory source
- version_added: 4.1.0
- author:
- - Dom Del Nano (@ddelnano)
- - Samori Gorse (@shinuza)
- requirements:
- - websocket-client >= 1.0.0
+DOCUMENTATION = r"""
+name: xen_orchestra
+short_description: Xen Orchestra inventory source
+version_added: 4.1.0
+author:
+ - Dom Del Nano (@ddelnano)
+ - Samori Gorse (@shinuza)
+requirements:
+ - websocket-client >= 1.0.0
+description:
+ - Get inventory hosts from a Xen Orchestra deployment.
+ - Uses a configuration file as an inventory source, it must end in C(.xen_orchestra.yml) or C(.xen_orchestra.yaml).
+extends_documentation_fragment:
+ - constructed
+ - inventory_cache
+options:
+ plugin:
+ description: The name of this plugin, it should always be set to V(community.general.xen_orchestra) for this plugin to
+ recognize it as its own.
+ required: true
+ choices: ['community.general.xen_orchestra']
+ type: str
+ api_host:
description:
- - Get inventory hosts from a Xen Orchestra deployment.
- - 'Uses a configuration file as an inventory source, it must end in C(.xen_orchestra.yml) or C(.xen_orchestra.yaml).'
- extends_documentation_fragment:
- - constructed
- - inventory_cache
- options:
- plugin:
- description: The name of this plugin, it should always be set to C(community.general.xen_orchestra) for this plugin to recognize it as its own.
- required: yes
- choices: ['community.general.xen_orchestra']
- type: str
- api_host:
- description:
- - API host to XOA API.
- - If the value is not specified in the inventory configuration, the value of environment variable C(ANSIBLE_XO_HOST) will be used instead.
- type: str
- env:
- - name: ANSIBLE_XO_HOST
- user:
- description:
- - Xen Orchestra user.
- - If the value is not specified in the inventory configuration, the value of environment variable C(ANSIBLE_XO_USER) will be used instead.
- required: yes
- type: str
- env:
- - name: ANSIBLE_XO_USER
- password:
- description:
- - Xen Orchestra password.
- - If the value is not specified in the inventory configuration, the value of environment variable C(ANSIBLE_XO_PASSWORD) will be used instead.
- required: yes
- type: str
- env:
- - name: ANSIBLE_XO_PASSWORD
- validate_certs:
- description: Verify TLS certificate if using HTTPS.
- type: boolean
- default: true
- use_ssl:
- description: Use wss when connecting to the Xen Orchestra API
- type: boolean
- default: true
-'''
+ - API host to XOA API.
+ - If the value is not specified in the inventory configuration, the value of environment variable E(ANSIBLE_XO_HOST)
+ is used instead.
+ type: str
+ env:
+ - name: ANSIBLE_XO_HOST
+ user:
+ description:
+ - Xen Orchestra user.
+ - If the value is not specified in the inventory configuration, the value of environment variable E(ANSIBLE_XO_USER)
+ is used instead.
+ required: true
+ type: str
+ env:
+ - name: ANSIBLE_XO_USER
+ password:
+ description:
+ - Xen Orchestra password.
+ - If the value is not specified in the inventory configuration, the value of environment variable E(ANSIBLE_XO_PASSWORD)
+ is used instead.
+ required: true
+ type: str
+ env:
+ - name: ANSIBLE_XO_PASSWORD
+ validate_certs:
+ description: Verify TLS certificate if using HTTPS.
+ type: boolean
+ default: true
+ use_ssl:
+ description: Use wss when connecting to the Xen Orchestra API.
+ type: boolean
+ default: true
+ use_vm_uuid:
+ description:
+ - Import Xen VMs to inventory using their UUID as the VM entry name.
+ - If set to V(false) use VM name labels instead of UUIDs.
+ type: boolean
+ default: true
+ version_added: 10.4.0
+ use_host_uuid:
+ description:
+ - Import Xen Hosts to inventory using their UUID as the Host entry name.
+ - If set to V(false) use Host name labels instead of UUIDs.
+ type: boolean
+ default: true
+ version_added: 10.4.0
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
+---
# file must be named xen_orchestra.yaml or xen_orchestra.yml
plugin: community.general.xen_orchestra
api_host: 192.168.1.255
@@ -70,19 +87,22 @@ password: xo_pwd
validate_certs: true
use_ssl: true
groups:
- kube_nodes: "'kube_node' in tags"
+ kube_nodes: "'kube_node' in tags"
compose:
- ansible_port: 2222
-
-'''
+ ansible_port: 2222
+use_vm_uuid: false
+use_host_uuid: true
+"""
import json
import ssl
+from time import sleep
from ansible.errors import AnsibleError
from ansible.plugins.inventory import BaseInventoryPlugin, Constructable, Cacheable
from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
+from ansible_collections.community.general.plugins.plugin_utils.unsafe import make_unsafe
# 3rd party imports
try:
@@ -136,27 +156,45 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
sslopt = None if validate_certs else {'cert_reqs': ssl.CERT_NONE}
self.conn = create_connection(
- '{0}://{1}/api/'.format(proto, xoa_api_host), sslopt=sslopt)
+ f'{proto}://{xoa_api_host}/api/', sslopt=sslopt)
+
+ CALL_TIMEOUT = 100
+ """Number of 1/10ths of a second to wait before method call times out."""
+
+ def call(self, method, params):
+ """Calls a method on the XO server with the provided parameters."""
+ id = self.pointer
+ self.conn.send(json.dumps({
+ 'id': id,
+ 'jsonrpc': '2.0',
+ 'method': method,
+ 'params': params
+ }))
+
+ waited = 0
+ while waited < self.CALL_TIMEOUT:
+ response = json.loads(self.conn.recv())
+ if 'id' in response and response['id'] == id:
+ return response
+ else:
+ sleep(0.1)
+ waited += 1
+
+ raise AnsibleError(f'Method call {method} timed out after {self.CALL_TIMEOUT / 10} seconds.')
def login(self, user, password):
- payload = {'id': self.pointer, 'jsonrpc': '2.0', 'method': 'session.signIn', 'params': {
- 'username': user, 'password': password}}
- self.conn.send(json.dumps(payload))
- result = json.loads(self.conn.recv())
+ result = self.call('session.signIn', {
+ 'username': user, 'password': password
+ })
if 'error' in result:
- raise AnsibleError(
- 'Could not connect: {0}'.format(result['error']))
+ raise AnsibleError(f"Could not connect: {result['error']}")
def get_object(self, name):
- payload = {'id': self.pointer, 'jsonrpc': '2.0',
- 'method': 'xo.getAllObjects', 'params': {'filter': {'type': name}}}
- self.conn.send(json.dumps(payload))
- answer = json.loads(self.conn.recv())
+ answer = self.call('xo.getAllObjects', {'filter': {'type': name}})
if 'error' in answer:
- raise AnsibleError(
- 'Could not request: {0}'.format(answer['error']))
+ raise AnsibleError(f"Could not request: {answer['error']}")
return answer['result']
@@ -177,10 +215,20 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
self._set_composite_vars(self.get_option('compose'), variables, name, strict=strict)
def _add_vms(self, vms, hosts, pools):
+ vm_name_list = []
for uuid, vm in vms.items():
+ if self.vm_entry_name_type == 'name_label':
+ if vm['name_label'] not in vm_name_list:
+ entry_name = vm['name_label']
+ vm_name_list.append(vm['name_label'])
+ else:
+ vm_duplicate_count = vm_name_list.count(vm['name_label'])
+ entry_name = f"{vm['name_label']}_{vm_duplicate_count}"
+ vm_name_list.append(vm['name_label'])
+ else:
+ entry_name = uuid
group = 'with_ip'
ip = vm.get('mainIpAddress')
- entry_name = uuid
power_state = vm['power_state'].lower()
pool_name = self._pool_group_name_for_uuid(pools, vm['$poolId'])
host_name = self._host_group_name_for_uuid(hosts, vm['$container'])
@@ -227,10 +275,20 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
self._apply_constructable(entry_name, self.inventory.get_host(entry_name).get_vars())
def _add_hosts(self, hosts, pools):
+ host_name_list = []
for host in hosts.values():
- entry_name = host['uuid']
- group_name = 'xo_host_{0}'.format(
- clean_group_name(host['name_label']))
+ if self.host_entry_name_type == 'name_label':
+ if host['name_label'] not in host_name_list:
+ entry_name = host['name_label']
+ host_name_list.append(host['name_label'])
+ else:
+ host_duplicate_count = host_name_list.count(host['name_label'])
+ entry_name = f"{host['name_label']}_{host_duplicate_count}"
+ host_name_list.append(host['name_label'])
+ else:
+ entry_name = host['uuid']
+
+ group_name = f"xo_host_{clean_group_name(host['name_label'])}"
pool_name = self._pool_group_name_for_uuid(pools, host['$poolId'])
self.inventory.add_group(group_name)
@@ -253,15 +311,13 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
entry_name, 'product_brand', host['productBrand'])
for pool in pools.values():
- group_name = 'xo_pool_{0}'.format(
- clean_group_name(pool['name_label']))
+ group_name = f"xo_pool_{clean_group_name(pool['name_label'])}"
self.inventory.add_group(group_name)
def _add_pools(self, pools):
for pool in pools.values():
- group_name = 'xo_pool_{0}'.format(
- clean_group_name(pool['name_label']))
+ group_name = f"xo_pool_{clean_group_name(pool['name_label'])}"
self.inventory.add_group(group_name)
@@ -269,16 +325,13 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
def _pool_group_name_for_uuid(self, pools, pool_uuid):
for pool in pools:
if pool == pool_uuid:
- return 'xo_pool_{0}'.format(
- clean_group_name(pools[pool_uuid]['name_label']))
+ return f"xo_pool_{clean_group_name(pools[pool_uuid]['name_label'])}"
# TODO: Refactor
def _host_group_name_for_uuid(self, hosts, host_uuid):
for host in hosts:
if host == host_uuid:
- return 'xo_host_{0}'.format(
- clean_group_name(hosts[host_uuid]['name_label']
- ))
+ return f"xo_host_{clean_group_name(hosts[host_uuid]['name_label'])}"
def _populate(self, objects):
# Prepare general groups
@@ -324,5 +377,13 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
if not self.get_option('use_ssl'):
self.protocol = 'ws'
+ self.vm_entry_name_type = 'uuid'
+ if not self.get_option('use_vm_uuid'):
+ self.vm_entry_name_type = 'name_label'
+
+ self.host_entry_name_type = 'uuid'
+ if not self.get_option('use_host_uuid'):
+ self.host_entry_name_type = 'name_label'
+
objects = self._get_objects()
- self._populate(objects)
+ self._populate(make_unsafe(objects))
diff --git a/plugins/lookup/binary_file.py b/plugins/lookup/binary_file.py
new file mode 100644
index 0000000000..3236ade3e4
--- /dev/null
+++ b/plugins/lookup/binary_file.py
@@ -0,0 +1,113 @@
+#
+# Copyright (c) 2025, Felix Fontein
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+DOCUMENTATION = r"""
+name: binary_file
+author: Felix Fontein (@felixfontein)
+short_description: Read binary file and return it Base64 encoded
+version_added: 11.2.0
+description:
+ - This lookup returns the contents from a file on the Ansible controller's file system.
+ - The file is read as a binary file and its contents are returned Base64 encoded.
+ This is similar to using P(ansible.builtin.file#lookup) combined with P(ansible.builtin.b64encode#filter),
+ except that P(ansible.builtin.file#lookup) does not support binary files as it interprets the contents as UTF-8,
+ which can cause the wrong content being Base64 encoded.
+options:
+ _terms:
+ description:
+ - Paths of the files to read.
+ - Relative paths will be searched for in different places. See R(Ansible task paths, playbook_task_paths) for more details.
+ required: true
+ type: list
+ elements: str
+ not_exist:
+ description:
+ - Determine how to react if the specified file cannot be found.
+ type: str
+ choices:
+ error: Raise an error.
+ empty: Return an empty string for the file.
+ empty_str:
+ - Return the string C(empty) for the file.
+ - This cannot be confused with Base64 encoding due to the missing padding.
+ default: error
+notes:
+ - This lookup does not understand 'globbing' - use the P(ansible.builtin.fileglob#lookup) lookup instead.
+seealso:
+ - plugin: ansible.builtin.b64decode
+ plugin_type: filter
+ description: >-
+ The b64decode filter can be used to decode Base64 encoded data.
+ Note that Ansible cannot handle binary data, the data will be interpreted as UTF-8 text!
+ - plugin: ansible.builtin.file
+ plugin_type: lookup
+ description: You can use this lookup plugin to read text files from the Ansible controller.
+ - module: ansible.builtin.slurp
+ description: >-
+ Also allows to read binary files Base64 encoded, but from remote targets.
+ With C(delegate_to: localhost) can be redirected to run on the controller, but you have to know the path to the file to read.
+ Both this plugin and P(ansible.builtin.file#lookup) use some search path logic to for example also find files in the C(files)
+ directory of a role.
+ - ref: playbook_task_paths
+ description: Search paths used for relative files.
+"""
+
+EXAMPLES = r"""
+---
+- name: Output Base64 contents of binary files on screen
+ ansible.builtin.debug:
+ msg: "Content: {{ lookup('community.general.binary_file', item) }}"
+ loop:
+ - some-binary-file.bin
+"""
+
+RETURN = r"""
+_raw:
+ description:
+ - Base64 encoded content of requested files, or an empty string resp. the string C(empty), depending on the O(not_exist) option.
+ - This list contains one string per element of O(_terms) in the same order as O(_terms).
+ type: list
+ elements: str
+ returned: success
+"""
+
+import base64
+
+from ansible.errors import AnsibleLookupError
+from ansible.plugins.lookup import LookupBase
+
+from ansible.utils.display import Display
+
+display = Display()
+
+
+class LookupModule(LookupBase):
+
+ def run(self, terms, variables=None, **kwargs):
+ self.set_options(var_options=variables, direct=kwargs)
+ not_exist = self.get_option("not_exist")
+
+ result = []
+ for term in terms:
+ display.debug(f"Searching for binary file: {term!r}")
+ path = self.find_file_in_search_path(variables, "files", term, ignore_missing=(not_exist != "error"))
+ display.vvvv(f"community.general.binary_file lookup using {path} as file")
+
+ if not path:
+ if not_exist == "empty":
+ result.append("")
+ continue
+ if not_exist == "empty_str":
+ result.append("empty")
+ continue
+ raise AnsibleLookupError(f"Could not locate file in community.general.binary_file lookup: {term}")
+
+ try:
+ with open(path, "rb") as f:
+ result.append(base64.b64encode(f.read()).decode("utf-8"))
+ except Exception as exc:
+ raise AnsibleLookupError(f"Error while reading {path}: {exc}")
+
+ return result
diff --git a/plugins/lookup/bitwarden.py b/plugins/lookup/bitwarden.py
index 124c139c78..e4d958a96f 100644
--- a/plugins/lookup/bitwarden.py
+++ b/plugins/lookup/bitwarden.py
@@ -1,54 +1,126 @@
-# -*- coding: utf-8 -*-
# Copyright (c) 2022, Jonathan Lung
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
-DOCUMENTATION = """
- name: bitwarden
- author:
- - Jonathan Lung (@lungj)
- requirements:
- - bw (command line utility)
- - be logged into bitwarden
- short_description: Retrieve secrets from Bitwarden
- version_added: 5.4.0
+DOCUMENTATION = r"""
+name: bitwarden
+author:
+ - Jonathan Lung (@lungj)
+requirements:
+ - bw (command line utility)
+ - be logged into bitwarden
+ - bitwarden vault unlocked
+ - E(BW_SESSION) environment variable set
+short_description: Retrieve secrets from Bitwarden
+version_added: 5.4.0
+description:
+ - Retrieve secrets from Bitwarden.
+options:
+ _terms:
+ description: Key(s) to fetch values for from login info.
+ required: true
+ type: list
+ elements: str
+ search:
description:
- - Retrieve secrets from Bitwarden.
- options:
- _terms:
- description: Key(s) to fetch values for from login info.
- required: true
- type: list
- elements: str
- field:
- description: Field to fetch; leave unset to fetch whole response.
- type: str
+ - Field to retrieve, for example V(name) or V(id).
+ - If set to V(id), only zero or one element can be returned. Use the Jinja C(first) filter to get the only list element.
+ - If set to V(None) or V(''), or if O(_terms) is empty, records are not filtered by fields.
+ type: str
+ default: name
+ version_added: 5.7.0
+ field:
+ description: Field to fetch. Leave unset to fetch whole response.
+ type: str
+ collection_id:
+ description:
+ - Collection ID to filter results by collection. Leave unset to skip filtering.
+ - O(collection_id) and O(collection_name) are mutually exclusive.
+ type: str
+ version_added: 6.3.0
+ collection_name:
+ description:
+ - Collection name to filter results by collection. Leave unset to skip filtering.
+ - O(collection_id) and O(collection_name) are mutually exclusive.
+ type: str
+ version_added: 10.4.0
+ organization_id:
+ description: Organization ID to filter results by organization. Leave unset to skip filtering.
+ type: str
+ version_added: 8.5.0
+ bw_session:
+ description: Pass session key instead of reading from env.
+ type: str
+ version_added: 8.4.0
+ result_count:
+ description:
+ - Number of results expected for the lookup query. Task fails if O(result_count) is set but does not match the number
+ of query results. Leave empty to skip this check.
+ type: int
+ version_added: 10.4.0
"""
-EXAMPLES = """
-- name: "Get 'password' from Bitwarden record named 'a_test'"
+EXAMPLES = r"""
+- name: "Get 'password' from all Bitwarden records named 'a_test'"
ansible.builtin.debug:
msg: >-
{{ lookup('community.general.bitwarden', 'a_test', field='password') }}
-- name: "Get full Bitwarden record named 'a_test'"
+- name: "Get 'password' from Bitwarden record with ID 'bafba515-af11-47e6-abe3-af1200cd18b2'"
+ ansible.builtin.debug:
+ msg: >-
+ {{ lookup('community.general.bitwarden', 'bafba515-af11-47e6-abe3-af1200cd18b2', search='id', field='password') | first }}
+
+- name: "Get 'password' from all Bitwarden records named 'a_test' from collection"
+ ansible.builtin.debug:
+ msg: >-
+ {{ lookup('community.general.bitwarden', 'a_test', field='password', collection_id='bafba515-af11-47e6-abe3-af1200cd18b2') }}
+
+- name: "Get list of all full Bitwarden records named 'a_test'"
ansible.builtin.debug:
msg: >-
{{ lookup('community.general.bitwarden', 'a_test') }}
+
+- name: "Get custom field 'api_key' from all Bitwarden records named 'a_test'"
+ ansible.builtin.debug:
+ msg: >-
+ {{ lookup('community.general.bitwarden', 'a_test', field='api_key') }}
+
+- name: "Get 'password' from all Bitwarden records named 'a_test', using given session key"
+ ansible.builtin.debug:
+ msg: >-
+ {{ lookup('community.general.bitwarden', 'a_test', field='password', bw_session='bXZ9B5TXi6...') }}
+
+- name: "Get all Bitwarden records from collection"
+ ansible.builtin.debug:
+ msg: >-
+ {{ lookup('community.general.bitwarden', None, collection_id='bafba515-af11-47e6-abe3-af1200cd18b2') }}
+
+- name: "Get all Bitwarden records from collection"
+ ansible.builtin.debug:
+ msg: >-
+ {{ lookup('community.general.bitwarden', None, collection_name='my_collections/test_collection') }}
+
+- name: "Get Bitwarden record named 'a_test', ensure there is exactly one match"
+ ansible.builtin.debug:
+ msg: >-
+ {{ lookup('community.general.bitwarden', 'a_test', result_count=1) }}
"""
-RETURN = """
- _raw:
- description: List of requested field or JSON object of list of matches.
- type: list
- elements: raw
+RETURN = r"""
+_raw:
+ description:
+ - A one-element list that contains a list of requested fields or JSON objects of matches.
+ - If you use C(query), you get a list of lists. If you use C(lookup) without C(wantlist=true), this always gets reduced
+ to a list of field values or JSON objects.
+ type: list
+ elements: list
"""
from subprocess import Popen, PIPE
-from ansible.errors import AnsibleError
+from ansible.errors import AnsibleError, AnsibleOptionsError
from ansible.module_utils.common.text.converters import to_bytes, to_text
from ansible.parsing.ajson import AnsibleJSONDecoder
from ansible.plugins.lookup import LookupBase
@@ -62,58 +134,162 @@ class Bitwarden(object):
def __init__(self, path='bw'):
self._cli_path = path
+ self._session = None
@property
def cli_path(self):
return self._cli_path
@property
- def logged_in(self):
+ def session(self):
+ return self._session
+
+ @session.setter
+ def session(self, value):
+ self._session = value
+
+ @property
+ def unlocked(self):
out, err = self._run(['status'], stdin="")
decoded = AnsibleJSONDecoder().raw_decode(out)[0]
return decoded['status'] == 'unlocked'
def _run(self, args, stdin=None, expected_rc=0):
+ if self.session:
+ args += ['--session', self.session]
+
p = Popen([self.cli_path] + args, stdout=PIPE, stderr=PIPE, stdin=PIPE)
out, err = p.communicate(to_bytes(stdin))
rc = p.wait()
if rc != expected_rc:
+ if len(args) > 2 and args[0] == 'get' and args[1] == 'item' and b'Not found.' in err:
+ return 'null', ''
raise BitwardenException(err)
return to_text(out, errors='surrogate_or_strict'), to_text(err, errors='surrogate_or_strict')
- def _get_matches(self, search_value, search_field="name"):
+ def _get_matches(self, search_value, search_field, collection_id=None, organization_id=None):
"""Return matching records whose search_field is equal to key.
"""
- out, err = self._run(['list', 'items', '--search', search_value])
+
+ # Prepare set of params for Bitwarden CLI
+ if search_field == 'id':
+ params = ['get', 'item', search_value]
+ else:
+ params = ['list', 'items']
+ if search_value:
+ params.extend(['--search', search_value])
+
+ if collection_id:
+ params.extend(['--collectionid', collection_id])
+ if organization_id:
+ params.extend(['--organizationid', organization_id])
+
+ out, err = self._run(params)
# This includes things that matched in different fields.
initial_matches = AnsibleJSONDecoder().raw_decode(out)[0]
- # Filter to only include results from the right field.
- return [item for item in initial_matches if item[search_field] == search_value]
+ if search_field == 'id':
+ if initial_matches is None:
+ initial_matches = []
+ else:
+ initial_matches = [initial_matches]
- def get_field(self, field, search_value, search_field="name"):
- """Return a list of the specified field for records whose search_field match search_value.
+ # Filter to only include results from the right field, if a search is requested by value or field
+ return [item for item in initial_matches
+ if not search_value or not search_field or item.get(search_field) == search_value]
+
+ def get_field(self, field, search_value, search_field="name", collection_id=None, organization_id=None):
+ """Return a list of the specified field for records whose search_field match search_value
+ and filtered by collection if collection has been provided.
If field is None, return the whole record for each match.
"""
- matches = self._get_matches(search_value)
+ matches = self._get_matches(search_value, search_field, collection_id, organization_id)
+ if not field:
+ return matches
+ field_matches = []
+ for match in matches:
+ # if there are no custom fields, then `match` has no key 'fields'
+ if 'fields' in match:
+ custom_field_found = False
+ for custom_field in match['fields']:
+ if field == custom_field['name']:
+ field_matches.append(custom_field['value'])
+ custom_field_found = True
+ break
+ if custom_field_found:
+ continue
+ if 'login' in match and field in match['login']:
+ field_matches.append(match['login'][field])
+ continue
+ if field in match:
+ field_matches.append(match[field])
+ continue
- if field:
- return [match['login'][field] for match in matches]
+ if matches and not field_matches:
+ raise AnsibleError(f"field {field} does not exist in {search_value}")
- return matches
+ return field_matches
+
+ def get_collection_ids(self, collection_name: str, organization_id=None) -> list[str]:
+ """Return matching IDs of collections whose name is equal to collection_name."""
+
+ # Prepare set of params for Bitwarden CLI
+ params = ['list', 'collections', '--search', collection_name]
+
+ if organization_id:
+ params.extend(['--organizationid', organization_id])
+
+ out, err = self._run(params)
+
+ # This includes things that matched in different fields.
+ initial_matches = AnsibleJSONDecoder().raw_decode(out)[0]
+
+ # Filter to only return the ID of a collections with exactly matching name
+ return [item['id'] for item in initial_matches
+ if str(item.get('name')).lower() == collection_name.lower()]
class LookupModule(LookupBase):
- def run(self, terms, variables=None, **kwargs):
+ def run(self, terms=None, variables=None, **kwargs):
self.set_options(var_options=variables, direct=kwargs)
field = self.get_option('field')
- if not _bitwarden.logged_in:
- raise AnsibleError("Not logged into Bitwarden. Run 'bw login'.")
+ search_field = self.get_option('search')
+ collection_id = self.get_option('collection_id')
+ collection_name = self.get_option('collection_name')
+ organization_id = self.get_option('organization_id')
+ result_count = self.get_option('result_count')
+ _bitwarden.session = self.get_option('bw_session')
- return [_bitwarden.get_field(field, term) for term in terms]
+ if not _bitwarden.unlocked:
+ raise AnsibleError("Bitwarden Vault locked. Run 'bw unlock'.")
+
+ if not terms:
+ terms = [None]
+
+ if collection_name and collection_id:
+ raise AnsibleOptionsError("'collection_name' and 'collection_id' are mutually exclusive!")
+ elif collection_name:
+ collection_ids = _bitwarden.get_collection_ids(collection_name, organization_id)
+ if not collection_ids:
+ raise BitwardenException("No matching collections found!")
+ else:
+ collection_ids = [collection_id]
+
+ results = [
+ _bitwarden.get_field(field, term, search_field, collection_id, organization_id)
+ for collection_id in collection_ids
+ for term in terms
+ ]
+
+ for result in results:
+ if result_count is not None and len(result) != result_count:
+ raise BitwardenException(
+ f"Number of results doesn't match result_count! ({len(result)} != {result_count})")
+
+ return results
_bitwarden = Bitwarden()
diff --git a/plugins/lookup/bitwarden_secrets_manager.py b/plugins/lookup/bitwarden_secrets_manager.py
new file mode 100644
index 0000000000..0227c16bae
--- /dev/null
+++ b/plugins/lookup/bitwarden_secrets_manager.py
@@ -0,0 +1,161 @@
+# Copyright (c) 2023, jantari (https://github.com/jantari)
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+from __future__ import annotations
+
+
+DOCUMENTATION = r"""
+name: bitwarden_secrets_manager
+author:
+ - jantari (@jantari)
+requirements:
+ - bws (command line utility)
+short_description: Retrieve secrets from Bitwarden Secrets Manager
+version_added: 7.2.0
+description:
+ - Retrieve secrets from Bitwarden Secrets Manager.
+options:
+ _terms:
+ description: Secret ID(s) to fetch values for.
+ required: true
+ type: list
+ elements: str
+ bws_access_token:
+ description: The BWS access token to use for this lookup.
+ env:
+ - name: BWS_ACCESS_TOKEN
+ required: true
+ type: str
+"""
+
+EXAMPLES = r"""
+- name: Get a secret relying on the BWS_ACCESS_TOKEN environment variable for authentication
+ ansible.builtin.debug:
+ msg: >-
+ {{ lookup("community.general.bitwarden_secrets_manager", "2bc23e48-4932-40de-a047-5524b7ddc972") }}
+
+- name: Get a secret passing an explicit access token for authentication
+ ansible.builtin.debug:
+ msg: >-
+ {{
+ lookup(
+ "community.general.bitwarden_secrets_manager",
+ "2bc23e48-4932-40de-a047-5524b7ddc972",
+ bws_access_token="9.4f570d14-4b54-42f5-bc07-60f4450b1db5.YmluYXJ5LXNvbWV0aGluZy0xMjMK:d2h5IGhlbGxvIHRoZXJlCg=="
+ )
+ }}
+
+- name: Get two different secrets each using a different access token for authentication
+ ansible.builtin.debug:
+ msg:
+ - '{{ lookup("community.general.bitwarden_secrets_manager", "2bc23e48-4932-40de-a047-5524b7ddc972", bws_access_token=token1) }}'
+ - '{{ lookup("community.general.bitwarden_secrets_manager", "9d89af4c-eb5d-41f5-bb0f-4ae81215c768", bws_access_token=token2) }}'
+ vars:
+ token1: "9.4f570d14-4b54-42f5-bc07-60f4450b1db5.YmluYXJ5LXNvbWV0aGluZy0xMjMK:d2h5IGhlbGxvIHRoZXJlCg=="
+ token2: "1.69b72797-6ea9-4687-a11e-848e41a30ae6.YW5zaWJsZSBpcyBncmVhdD8K:YW5zaWJsZSBpcyBncmVhdAo="
+
+- name: Get just the value of a secret
+ ansible.builtin.debug:
+ msg: >-
+ {{ lookup("community.general.bitwarden_secrets_manager", "2bc23e48-4932-40de-a047-5524b7ddc972").value }}
+"""
+
+RETURN = r"""
+_raw:
+ description: List containing one or more secrets.
+ type: list
+ elements: dict
+"""
+
+from subprocess import Popen, PIPE
+from time import sleep
+
+from ansible.errors import AnsibleLookupError
+from ansible.module_utils.common.text.converters import to_text
+from ansible.parsing.ajson import AnsibleJSONDecoder
+from ansible.plugins.lookup import LookupBase
+
+from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
+
+
+class BitwardenSecretsManagerException(AnsibleLookupError):
+ pass
+
+
+class BitwardenSecretsManager(object):
+ def __init__(self, path='bws'):
+ self._cli_path = path
+ self._max_retries = 3
+ self._retry_delay = 1
+
+ @property
+ def cli_path(self):
+ return self._cli_path
+
+ def _run_with_retry(self, args, stdin=None, retries=0):
+ out, err, rc = self._run(args, stdin)
+
+ if rc != 0:
+ if retries >= self._max_retries:
+ raise BitwardenSecretsManagerException("Max retries exceeded. Unable to retrieve secret.")
+
+ if "Too many requests" in err:
+ delay = self._retry_delay * (2 ** retries)
+ sleep(delay)
+ return self._run_with_retry(args, stdin, retries + 1)
+ else:
+ raise BitwardenSecretsManagerException(f"Command failed with return code {rc}: {err}")
+
+ return out, err, rc
+
+ def _run(self, args, stdin=None):
+ p = Popen([self.cli_path] + args, stdout=PIPE, stderr=PIPE, stdin=PIPE)
+ out, err = p.communicate(stdin)
+ rc = p.wait()
+ return to_text(out, errors='surrogate_or_strict'), to_text(err, errors='surrogate_or_strict'), rc
+
+ def get_bws_version(self):
+ """Get the version of the Bitwarden Secrets Manager CLI.
+ """
+ out, err, rc = self._run(['--version'])
+ if rc != 0:
+ raise BitwardenSecretsManagerException(to_text(err))
+ # strip the prefix and grab the last segment, the version number
+ return out.split()[-1]
+
+ def get_secret(self, secret_id, bws_access_token):
+ """Get and return the secret with the given secret_id.
+ """
+
+ # Prepare set of params for Bitwarden Secrets Manager CLI
+ # Color output was not always disabled correctly with the default 'auto' setting so explicitly disable it.
+ params = [
+ '--color', 'no',
+ '--access-token', bws_access_token
+ ]
+
+ # bws version 0.3.0 introduced a breaking change in the command line syntax:
+ # pre-0.3.0: verb noun
+ # 0.3.0 and later: noun verb
+ bws_version = self.get_bws_version()
+ if LooseVersion(bws_version) < LooseVersion('0.3.0'):
+ params.extend(['get', 'secret', secret_id])
+ else:
+ params.extend(['secret', 'get', secret_id])
+
+ out, err, rc = self._run_with_retry(params)
+ if rc != 0:
+ raise BitwardenSecretsManagerException(to_text(err))
+
+ return AnsibleJSONDecoder().raw_decode(out)[0]
+
+
+class LookupModule(LookupBase):
+ def run(self, terms, variables=None, **kwargs):
+ self.set_options(var_options=variables, direct=kwargs)
+ bws_access_token = self.get_option('bws_access_token')
+
+ return [_bitwarden_secrets_manager.get_secret(term, bws_access_token) for term in terms]
+
+
+_bitwarden_secrets_manager = BitwardenSecretsManager()
diff --git a/plugins/lookup/cartesian.py b/plugins/lookup/cartesian.py
index 6d98c271ee..1e07326a17 100644
--- a/plugins/lookup/cartesian.py
+++ b/plugins/lookup/cartesian.py
@@ -1,27 +1,27 @@
-# -*- coding: utf-8 -*-
# Copyright (c) 2013, Bradley Young
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
-DOCUMENTATION = '''
- author: Unknown (!UNKNOWN)
- name: cartesian
- short_description: returns the cartesian product of lists
+DOCUMENTATION = r"""
+author: Unknown (!UNKNOWN)
+name: cartesian
+short_description: Returns the cartesian product of lists
+description:
+ - Takes the input lists and returns a list that represents the product of the input lists.
+ - It is clearer with an example, it turns [1, 2, 3], [a, b] into [1, a], [1, b], [2, a], [2, b], [3, a], [3, b].
+ - You can see the exact syntax in the examples section.
+options:
+ _terms:
description:
- - Takes the input lists and returns a list that represents the product of the input lists.
- - It is clearer with an example, it turns [1, 2, 3], [a, b] into [1, a], [1, b], [2, a], [2, b], [3, a], [3, b].
- You can see the exact syntax in the examples section.
- options:
- _raw:
- description:
- - a set of lists
- required: True
-'''
+ - A set of lists.
+ type: list
+ elements: list
+ required: true
+"""
-EXAMPLES = """
+EXAMPLES = r"""
- name: Example of the change in the description
ansible.builtin.debug:
msg: "{{ lookup('community.general.cartesian', [1,2,3], [a, b])}}"
@@ -32,15 +32,15 @@ EXAMPLES = """
with_community.general.cartesian:
- "{{list1}}"
- "{{list2}}"
- - [1,2,3,4,5,6]
+ - [1, 2, 3, 4, 5, 6]
"""
-RETURN = """
- _list:
- description:
- - list of lists composed of elements of the input lists
- type: list
- elements: list
+RETURN = r"""
+_list:
+ description:
+ - List of lists composed of elements of the input lists.
+ type: list
+ elements: list
"""
from itertools import product
@@ -64,11 +64,11 @@ class LookupModule(LookupBase):
"""
results = []
for x in terms:
- intermediate = listify_lookup_plugin_terms(x, templar=self._templar, loader=self._loader)
- results.append(intermediate)
+ results.append(listify_lookup_plugin_terms(x, templar=self._templar))
return results
def run(self, terms, variables=None, **kwargs):
+ self.set_options(var_options=variables, direct=kwargs)
terms = self._lookup_variables(terms)
diff --git a/plugins/lookup/chef_databag.py b/plugins/lookup/chef_databag.py
index 04ef7ee41d..69a53d007e 100644
--- a/plugins/lookup/chef_databag.py
+++ b/plugins/lookup/chef_databag.py
@@ -1,45 +1,44 @@
-# -*- coding: utf-8 -*-
# Copyright (c) 2016, Josh Bradley
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
-DOCUMENTATION = '''
- author: Unknown (!UNKNOWN)
- name: chef_databag
- short_description: fetches data from a Chef Databag
+DOCUMENTATION = r"""
+author: Unknown (!UNKNOWN)
+name: chef_databag
+short_description: Fetches data from a Chef Databag
+description:
+ - 'This is a lookup plugin to provide access to chef data bags using the pychef package. It interfaces with the chef server
+ API using the same methods to find a knife or chef-client config file to load parameters from, starting from either the
+ given base path or the current working directory. The lookup order mirrors the one from Chef, all folders in the base
+ path are walked back looking for the following configuration file in order: C(.chef/knife.rb), C(~/.chef/knife.rb), C(/etc/chef/client.rb).'
+requirements:
+ - "pychef (L(Python library, https://pychef.readthedocs.io), C(pip install pychef))"
+options:
+ name:
description:
- - "This is a lookup plugin to provide access to chef data bags using the pychef package.
- It interfaces with the chef server api using the same methods to find a knife or chef-client config file to load parameters from,
- starting from either the given base path or the current working directory.
- The lookup order mirrors the one from Chef, all folders in the base path are walked back looking for the following configuration
- file in order : .chef/knife.rb, ~/.chef/knife.rb, /etc/chef/client.rb"
- requirements:
- - "pychef (L(Python library, https://pychef.readthedocs.io), C(pip install pychef))"
- options:
- name:
- description:
- - Name of the databag
- required: True
- item:
- description:
- - Item to fetch
- required: True
-'''
-
-EXAMPLES = """
- - ansible.builtin.debug:
- msg: "{{ lookup('community.general.chef_databag', 'name=data_bag_name item=data_bag_item') }}"
+ - Name of the databag.
+ type: string
+ required: true
+ item:
+ description:
+ - Item to fetch.
+ type: string
+ required: true
"""
-RETURN = """
- _raw:
- description:
- - The value from the databag.
- type: list
- elements: dict
+EXAMPLES = r"""
+- ansible.builtin.debug:
+ msg: "{{ lookup('community.general.chef_databag', 'name=data_bag_name item=data_bag_item') }}"
+"""
+
+RETURN = r"""
+_raw:
+ description:
+ - The value from the databag.
+ type: list
+ elements: dict
"""
from ansible.errors import AnsibleError
@@ -79,11 +78,11 @@ class LookupModule(LookupBase):
setattr(self, arg, parsed)
except ValueError:
raise AnsibleError(
- "can't parse arg {0}={1} as string".format(arg, arg_raw)
+ f"can't parse arg {arg}={arg_raw} as string"
)
if args:
raise AnsibleError(
- "unrecognized arguments to with_sequence: %r" % list(args.keys())
+ f"unrecognized arguments to with_sequence: {list(args.keys())!r}"
)
def run(self, terms, variables=None, **kwargs):
diff --git a/plugins/lookup/collection_version.py b/plugins/lookup/collection_version.py
index 4d25585b81..7a9eaf10bd 100644
--- a/plugins/lookup/collection_version.py
+++ b/plugins/lookup/collection_version.py
@@ -2,72 +2,67 @@
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
-DOCUMENTATION = """
+DOCUMENTATION = r"""
name: collection_version
author: Felix Fontein (@felixfontein)
version_added: "4.0.0"
short_description: Retrieves the version of an installed collection
description:
- - This lookup allows to query the version of an installed collection, and to determine whether a
- collection is installed at all.
- - By default it returns C(none) for non-existing collections and C(*) for collections without a
- version number. The latter should only happen in development environments, or when installing
- a collection from git which has no version in its C(galaxy.yml). This behavior can be adjusted
- by providing other values with I(result_not_found) and I(result_no_version).
+ - This lookup allows to query the version of an installed collection, and to determine whether a collection is installed
+ at all.
+ - By default it returns V(none) for non-existing collections and V(*) for collections without a version number. The latter
+ should only happen in development environments, or when installing a collection from git which has no version in its C(galaxy.yml).
+ This behavior can be adjusted by providing other values with O(result_not_found) and O(result_no_version).
options:
_terms:
description:
- The collections to look for.
- - For example C(community.general).
+ - For example V(community.general).
type: list
elements: str
required: true
result_not_found:
description:
- The value to return when the collection could not be found.
- - By default, C(none) is returned.
+ - By default, V(none) is returned.
type: string
default: ~
result_no_version:
description:
- The value to return when the collection has no version number.
- - This can happen for collections installed from git which do not have a version number
- in C(galaxy.yml).
- - By default, C(*) is returned.
+ - This can happen for collections installed from git which do not have a version number in C(galaxy.yml).
+ - By default, V(*) is returned.
type: string
default: '*'
"""
-EXAMPLES = """
+EXAMPLES = r"""
- name: Check version of community.general
ansible.builtin.debug:
msg: "community.general version {{ lookup('community.general.collection_version', 'community.general') }}"
"""
-RETURN = """
- _raw:
- description:
- - The version number of the collections listed as input.
- - If a collection can not be found, it will return the value provided in I(result_not_found).
- By default, this is C(none).
- - If a collection can be found, but the version not identified, it will return the value provided in
- I(result_no_version). By default, this is C(*). This can happen for collections installed
- from git which do not have a version number in C(galaxy.yml).
- type: list
- elements: str
+RETURN = r"""
+_raw:
+ description:
+ - The version number of the collections listed as input.
+ - If a collection can not be found, it returns the value provided in O(result_not_found). By default, this is V(none).
+ - If a collection can be found, but the version not identified, it returns the value provided in O(result_no_version).
+ By default, this is V(*). This can happen for collections installed from git which do not have a version number in V(galaxy.yml).
+ type: list
+ elements: str
"""
import json
import os
import re
+from importlib import import_module
import yaml
from ansible.errors import AnsibleLookupError
-from ansible.module_utils.compat.importlib import import_module
from ansible.plugins.lookup import LookupBase
@@ -98,15 +93,10 @@ def load_collection_meta(collection_pkg, no_version='*'):
if os.path.exists(manifest_path):
return load_collection_meta_manifest(manifest_path)
- # Try to load galaxy.y(a)ml
+ # Try to load galaxy.yml
galaxy_path = os.path.join(path, 'galaxy.yml')
- galaxy_alt_path = os.path.join(path, 'galaxy.yaml')
- # galaxy.yaml was only supported in ansible-base 2.10 and ansible-core 2.11. Support was removed
- # in https://github.com/ansible/ansible/commit/595413d11346b6f26bb3d9df2d8e05f2747508a3 for
- # ansible-core 2.12.
- for path in (galaxy_path, galaxy_alt_path):
- if os.path.exists(path):
- return load_collection_meta_galaxy(path, no_version=no_version)
+ if os.path.exists(galaxy_path):
+ return load_collection_meta_galaxy(galaxy_path, no_version=no_version)
return {}
@@ -120,10 +110,10 @@ class LookupModule(LookupBase):
for term in terms:
if not FQCN_RE.match(term):
- raise AnsibleLookupError('"{term}" is not a FQCN'.format(term=term))
+ raise AnsibleLookupError(f'"{term}" is not a FQCN')
try:
- collection_pkg = import_module('ansible_collections.{fqcn}'.format(fqcn=term))
+ collection_pkg = import_module(f'ansible_collections.{term}')
except ImportError:
# Collection not found
result.append(not_found)
@@ -132,7 +122,7 @@ class LookupModule(LookupBase):
try:
data = load_collection_meta(collection_pkg, no_version=no_version)
except Exception as exc:
- raise AnsibleLookupError('Error while loading metadata for {fqcn}: {error}'.format(fqcn=term, error=exc))
+ raise AnsibleLookupError(f'Error while loading metadata for {term}: {exc}')
result.append(data.get('version', no_version))
diff --git a/plugins/lookup/consul_kv.py b/plugins/lookup/consul_kv.py
index 794df197fc..c9cc3c6399 100644
--- a/plugins/lookup/consul_kv.py
+++ b/plugins/lookup/consul_kv.py
@@ -1,112 +1,117 @@
-# -*- coding: utf-8 -*-
# Copyright (c) 2015, Steve Gargan
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import (absolute_import, division, print_function)
+from __future__ import annotations
-__metaclass__ = type
-DOCUMENTATION = '''
- author: Unknown (!UNKNOWN)
- name: consul_kv
- short_description: Fetch metadata from a Consul key value store.
- description:
- - Lookup metadata for a playbook from the key value store in a Consul cluster.
- Values can be easily set in the kv store with simple rest commands
- - C(curl -X PUT -d 'some-value' http://localhost:8500/v1/kv/ansible/somedata)
- requirements:
- - 'python-consul python library U(https://python-consul.readthedocs.io/en/latest/#installation)'
- options:
- _raw:
- description: List of key(s) to retrieve.
- type: list
- elements: string
- recurse:
- type: boolean
- description: If true, will retrieve all the values that have the given key as prefix.
- default: False
- index:
- description:
- - If the key has a value with the specified index then this is returned allowing access to historical values.
- datacenter:
- description:
- - Retrieve the key from a consul datacenter other than the default for the consul host.
- token:
- description: The acl token to allow access to restricted values.
- host:
- default: localhost
- description:
- - The target to connect to, must be a resolvable address.
- Will be determined from C(ANSIBLE_CONSUL_URL) if that is set.
- - "C(ANSIBLE_CONSUL_URL) should look like this: C(https://my.consul.server:8500)"
- env:
- - name: ANSIBLE_CONSUL_URL
- ini:
- - section: lookup_consul
- key: host
- port:
- description:
- - The port of the target host to connect to.
- - If you use C(ANSIBLE_CONSUL_URL) this value will be used from there.
- default: 8500
- scheme:
- default: http
- description:
- - Whether to use http or https.
- - If you use C(ANSIBLE_CONSUL_URL) this value will be used from there.
- validate_certs:
- default: True
- description: Whether to verify the ssl connection or not.
- env:
- - name: ANSIBLE_CONSUL_VALIDATE_CERTS
- ini:
- - section: lookup_consul
- key: validate_certs
- client_cert:
- description: The client cert to verify the ssl connection.
- env:
- - name: ANSIBLE_CONSUL_CLIENT_CERT
- ini:
- - section: lookup_consul
- key: client_cert
- url:
- description: "The target to connect to, should look like this: C(https://my.consul.server:8500)."
- type: str
- version_added: 1.0.0
- env:
- - name: ANSIBLE_CONSUL_URL
- ini:
- - section: lookup_consul
- key: url
-'''
-
-EXAMPLES = """
- - ansible.builtin.debug:
- msg: 'key contains {{item}}'
- with_community.general.consul_kv:
- - 'key/to/retrieve'
-
- - name: Parameters can be provided after the key be more specific about what to retrieve
- ansible.builtin.debug:
- msg: 'key contains {{item}}'
- with_community.general.consul_kv:
- - 'key/to recurse=true token=E6C060A9-26FB-407A-B83E-12DDAFCB4D98'
-
- - name: retrieving a KV from a remote cluster on non default port
- ansible.builtin.debug:
- msg: "{{ lookup('community.general.consul_kv', 'my/key', host='10.10.10.10', port='2000') }}"
-"""
-
-RETURN = """
+DOCUMENTATION = r"""
+author: Unknown (!UNKNOWN)
+name: consul_kv
+short_description: Fetch metadata from a Consul key value store
+description:
+ - Lookup metadata for a playbook from the key value store in a Consul cluster. Values can be easily set in the kv store
+ with simple rest commands.
+ - C(curl -X PUT -d 'some-value' http://localhost:8500/v1/kv/ansible/somedata).
+requirements:
+ - 'py-consul python library U(https://github.com/criteo/py-consul?tab=readme-ov-file#installation)'
+options:
_raw:
+ description: List of key(s) to retrieve.
+ type: list
+ elements: string
+ recurse:
+ type: boolean
+ description: If V(true), retrieves all the values that have the given key as prefix.
+ default: false
+ index:
description:
- - Value(s) stored in consul.
- type: dict
+ - If the key has a value with the specified index then this is returned allowing access to historical values.
+ type: int
+ datacenter:
+ description:
+ - Retrieve the key from a consul datacenter other than the default for the consul host.
+ type: str
+ token:
+ description: The acl token to allow access to restricted values.
+ type: str
+ host:
+ default: localhost
+ type: str
+ description:
+ - The target to connect to, must be a resolvable address.
+ - It is determined from E(ANSIBLE_CONSUL_URL) if that is set.
+ ini:
+ - section: lookup_consul
+ key: host
+ port:
+ description:
+ - The port of the target host to connect to.
+ - If you use E(ANSIBLE_CONSUL_URL) this value is used from there.
+ type: int
+ default: 8500
+ scheme:
+ default: http
+ type: str
+ description:
+ - Whether to use http or https.
+ - If you use E(ANSIBLE_CONSUL_URL) this value is used from there.
+ validate_certs:
+ default: true
+ description: Whether to verify the TLS connection or not.
+ type: bool
+ env:
+ - name: ANSIBLE_CONSUL_VALIDATE_CERTS
+ ini:
+ - section: lookup_consul
+ key: validate_certs
+ client_cert:
+ description: The client cert to verify the TLS connection.
+ type: str
+ env:
+ - name: ANSIBLE_CONSUL_CLIENT_CERT
+ ini:
+ - section: lookup_consul
+ key: client_cert
+ url:
+ description:
+ - The target to connect to.
+ - 'Should look like this: V(https://my.consul.server:8500).'
+ type: str
+ version_added: 1.0.0
+ env:
+ - name: ANSIBLE_CONSUL_URL
+ ini:
+ - section: lookup_consul
+ key: url
"""
-import os
-from ansible.module_utils.six.moves.urllib.parse import urlparse
+EXAMPLES = r"""
+- ansible.builtin.debug:
+ msg: 'key contains {{item}}'
+ with_community.general.consul_kv:
+ - 'key/to/retrieve'
+
+- name: Parameters can be provided after the key be more specific about what to retrieve
+ ansible.builtin.debug:
+ msg: 'key contains {{item}}'
+ with_community.general.consul_kv:
+ - 'key/to recurse=true token=E6C060A9-26FB-407A-B83E-12DDAFCB4D98'
+
+- name: retrieving a KV from a remote cluster on non default port
+ ansible.builtin.debug:
+ msg: "{{ lookup('community.general.consul_kv', 'my/key', host='10.10.10.10', port=2000) }}"
+"""
+
+RETURN = r"""
+_raw:
+ description:
+ - Value(s) stored in consul.
+ type: dict
+"""
+
+from urllib.parse import urlparse
+
from ansible.errors import AnsibleError, AnsibleAssertionError
from ansible.plugins.lookup import LookupBase
from ansible.module_utils.common.text.converters import to_text
@@ -125,7 +130,7 @@ class LookupModule(LookupBase):
if not HAS_CONSUL:
raise AnsibleError(
- 'python-consul is required for consul_kv lookup. see http://python-consul.readthedocs.org/en/latest/#installation')
+ 'py-consul is required for consul_kv lookup. see https://github.com/criteo/py-consul?tab=readme-ov-file#installation')
# get options
self.set_options(direct=kwargs)
@@ -165,7 +170,7 @@ class LookupModule(LookupBase):
values.append(to_text(results[1]['Value']))
except Exception as e:
raise AnsibleError(
- "Error locating '%s' in kv store. Error was %s" % (term, e))
+ f"Error locating '{term}' in kv store. Error was {e}")
return values
@@ -186,7 +191,7 @@ class LookupModule(LookupBase):
if param and len(param) > 0:
name, value = param.split('=')
if name not in paramvals:
- raise AnsibleAssertionError("%s not a valid consul lookup parameter" % name)
+ raise AnsibleAssertionError(f"{name} not a valid consul lookup parameter")
paramvals[name] = value
except (ValueError, AssertionError) as e:
raise AnsibleError(e)
diff --git a/plugins/lookup/credstash.py b/plugins/lookup/credstash.py
index a783f8ba08..01e6a1a8fe 100644
--- a/plugins/lookup/credstash.py
+++ b/plugins/lookup/credstash.py
@@ -1,51 +1,57 @@
-# -*- coding: utf-8 -*-
# Copyright (c) 2015, Ensighten
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
-DOCUMENTATION = '''
- author: Unknown (!UNKNOWN)
- name: credstash
- short_description: retrieve secrets from Credstash on AWS
- requirements:
- - credstash (python library)
- description:
- - "Credstash is a small utility for managing secrets using AWS's KMS and DynamoDB: https://github.com/fugue/credstash"
- options:
- _terms:
- description: term or list of terms to lookup in the credit store
- type: list
- elements: string
- required: true
- table:
- description: name of the credstash table to query
- default: 'credential-store'
- version:
- description: Credstash version
- region:
- description: AWS region
- profile_name:
- description: AWS profile to use for authentication
- env:
- - name: AWS_PROFILE
- aws_access_key_id:
- description: AWS access key ID
- env:
- - name: AWS_ACCESS_KEY_ID
- aws_secret_access_key:
- description: AWS access key
- env:
- - name: AWS_SECRET_ACCESS_KEY
- aws_session_token:
- description: AWS session token
- env:
- - name: AWS_SESSION_TOKEN
-'''
+DOCUMENTATION = r"""
+author: Unknown (!UNKNOWN)
+name: credstash
+short_description: Retrieve secrets from Credstash on AWS
+requirements:
+ - credstash (python library)
+description:
+ - "Credstash is a small utility for managing secrets using AWS's KMS and DynamoDB: https://github.com/fugue/credstash."
+options:
+ _terms:
+ description: Term or list of terms to lookup in the credit store.
+ type: list
+ elements: string
+ required: true
+ table:
+ description: Name of the credstash table to query.
+ type: str
+ default: 'credential-store'
+ version:
+ description: Credstash version.
+ type: str
+ default: ''
+ region:
+ description: AWS region.
+ type: str
+ profile_name:
+ description: AWS profile to use for authentication.
+ type: str
+ env:
+ - name: AWS_PROFILE
+ aws_access_key_id:
+ description: AWS access key ID.
+ type: str
+ env:
+ - name: AWS_ACCESS_KEY_ID
+ aws_secret_access_key:
+ description: AWS access key.
+ type: str
+ env:
+ - name: AWS_SECRET_ACCESS_KEY
+ aws_session_token:
+ description: AWS session token.
+ type: str
+ env:
+ - name: AWS_SESSION_TOKEN
+"""
-EXAMPLES = """
+EXAMPLES = r"""
- name: first use credstash to store your secrets
ansible.builtin.shell: credstash put my-github-password secure123
@@ -69,24 +75,22 @@ EXAMPLES = """
environment: production
tasks:
- - name: "Test credstash lookup plugin -- get the password with a context passed as a variable"
- ansible.builtin.debug:
- msg: "{{ lookup('community.general.credstash', 'some-password', context=context) }}"
+ - name: "Test credstash lookup plugin -- get the password with a context passed as a variable"
+ ansible.builtin.debug:
+ msg: "{{ lookup('community.general.credstash', 'some-password', context=context) }}"
- - name: "Test credstash lookup plugin -- get the password with a context defined here"
- ansible.builtin.debug:
- msg: "{{ lookup('community.general.credstash', 'some-password', context=dict(app='my_app', environment='production')) }}"
+ - name: "Test credstash lookup plugin -- get the password with a context defined here"
+ ansible.builtin.debug:
+ msg: "{{ lookup('community.general.credstash', 'some-password', context=dict(app='my_app', environment='production')) }}"
"""
-RETURN = """
- _raw:
- description:
- - Value(s) stored in Credstash.
- type: str
+RETURN = r"""
+_raw:
+ description:
+ - Value(s) stored in Credstash.
+ type: str
"""
-import os
-
from ansible.errors import AnsibleError
from ansible.plugins.lookup import LookupBase
@@ -100,28 +104,39 @@ except ImportError:
class LookupModule(LookupBase):
- def run(self, terms, variables, **kwargs):
-
+ def run(self, terms, variables=None, **kwargs):
if not CREDSTASH_INSTALLED:
raise AnsibleError('The credstash lookup plugin requires credstash to be installed.')
+ self.set_options(var_options=variables, direct=kwargs)
+
+ version = self.get_option('version')
+ region = self.get_option('region')
+ table = self.get_option('table')
+ profile_name = self.get_option('profile_name')
+ aws_access_key_id = self.get_option('aws_access_key_id')
+ aws_secret_access_key = self.get_option('aws_secret_access_key')
+ aws_session_token = self.get_option('aws_session_token')
+
+ context = {
+ k: v for k, v in kwargs.items()
+ if k not in ('version', 'region', 'table', 'profile_name', 'aws_access_key_id', 'aws_secret_access_key', 'aws_session_token')
+ }
+
+ kwargs_pass = {
+ 'profile_name': profile_name,
+ 'aws_access_key_id': aws_access_key_id,
+ 'aws_secret_access_key': aws_secret_access_key,
+ 'aws_session_token': aws_session_token,
+ }
+
ret = []
for term in terms:
try:
- version = kwargs.pop('version', '')
- region = kwargs.pop('region', None)
- table = kwargs.pop('table', 'credential-store')
- profile_name = kwargs.pop('profile_name', os.getenv('AWS_PROFILE', None))
- aws_access_key_id = kwargs.pop('aws_access_key_id', os.getenv('AWS_ACCESS_KEY_ID', None))
- aws_secret_access_key = kwargs.pop('aws_secret_access_key', os.getenv('AWS_SECRET_ACCESS_KEY', None))
- aws_session_token = kwargs.pop('aws_session_token', os.getenv('AWS_SESSION_TOKEN', None))
- kwargs_pass = {'profile_name': profile_name, 'aws_access_key_id': aws_access_key_id,
- 'aws_secret_access_key': aws_secret_access_key, 'aws_session_token': aws_session_token}
- val = credstash.getSecret(term, version, region, table, context=kwargs, **kwargs_pass)
+ ret.append(credstash.getSecret(term, version, region, table, context=context, **kwargs_pass))
except credstash.ItemNotFound:
- raise AnsibleError('Key {0} not found'.format(term))
+ raise AnsibleError(f'Key {term} not found')
except Exception as e:
- raise AnsibleError('Encountered exception while fetching {0}: {1}'.format(term, e))
- ret.append(val)
+ raise AnsibleError(f'Encountered exception while fetching {term}: {e}')
return ret
diff --git a/plugins/lookup/cyberarkpassword.py b/plugins/lookup/cyberarkpassword.py
index a0e36d3efe..955ba4a89a 100644
--- a/plugins/lookup/cyberarkpassword.py
+++ b/plugins/lookup/cyberarkpassword.py
@@ -1,63 +1,67 @@
-# -*- coding: utf-8 -*-
# Copyright (c) 2017, Edward Nunez
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
+from __future__ import annotations
-DOCUMENTATION = '''
- author: Unknown (!UNKNOWN)
- name: cyberarkpassword
- short_description: get secrets from CyberArk AIM
- requirements:
- - CyberArk AIM tool installed
+DOCUMENTATION = r"""
+author: Unknown (!UNKNOWN)
+name: cyberarkpassword
+short_description: Get secrets from CyberArk AIM
+requirements:
+ - CyberArk AIM tool installed
+description:
+ - Get secrets from CyberArk AIM.
+options:
+ _command:
+ description: Cyberark CLI utility.
+ type: string
+ env:
+ - name: AIM_CLIPASSWORDSDK_CMD
+ default: '/opt/CARKaim/sdk/clipasswordsdk'
+ appid:
+ description: Defines the unique ID of the application that is issuing the password request.
+ type: string
+ required: true
+ query:
+ description: Describes the filter criteria for the password retrieval.
+ type: string
+ required: true
+ output:
description:
- - Get secrets from CyberArk AIM.
- options :
- _command:
- description: Cyberark CLI utility.
- env:
- - name: AIM_CLIPASSWORDSDK_CMD
- default: '/opt/CARKaim/sdk/clipasswordsdk'
- appid:
- description: Defines the unique ID of the application that is issuing the password request.
- required: True
- query:
- description: Describes the filter criteria for the password retrieval.
- required: True
- output:
- description:
- - Specifies the desired output fields separated by commas.
- - "They could be: Password, PassProps.