diff --git a/.azure-pipelines/azure-pipelines.yml b/.azure-pipelines/azure-pipelines.yml
index 2daffd359f..455d0659b4 100644
--- a/.azure-pipelines/azure-pipelines.yml
+++ b/.azure-pipelines/azure-pipelines.yml
@@ -29,14 +29,14 @@ schedules:
always: true
branches:
include:
+ - stable-11
- stable-10
- - stable-9
- cron: 0 11 * * 0
displayName: Weekly (old stable branches)
always: true
branches:
include:
- - stable-8
+ - stable-9
variables:
- name: checkoutPath
@@ -51,7 +51,7 @@ variables:
resources:
containers:
- container: default
- image: quay.io/ansible/azure-pipelines-test-container:6.0.0
+ image: quay.io/ansible/azure-pipelines-test-container:7.0.0
pool: Standard
@@ -70,6 +70,19 @@ stages:
- test: 2
- test: 3
- test: 4
+ - stage: Sanity_2_19
+ displayName: Sanity 2.19
+ dependsOn: []
+ jobs:
+ - template: templates/matrix.yml
+ parameters:
+ nameFormat: Test {0}
+ testFormat: 2.19/sanity/{0}
+ targets:
+ - test: 1
+ - test: 2
+ - test: 3
+ - test: 4
- stage: Sanity_2_18
displayName: Sanity 2.18
dependsOn: []
@@ -96,19 +109,6 @@ stages:
- test: 2
- test: 3
- test: 4
- - stage: Sanity_2_16
- displayName: Sanity 2.16
- dependsOn: []
- jobs:
- - template: templates/matrix.yml
- parameters:
- nameFormat: Test {0}
- testFormat: 2.16/sanity/{0}
- targets:
- - test: 1
- - test: 2
- - test: 3
- - test: 4
### Units
- stage: Units_devel
displayName: Units devel
@@ -119,12 +119,24 @@ stages:
nameFormat: Python {0}
testFormat: devel/units/{0}/1
targets:
- - test: 3.8
- test: 3.9
- test: '3.10'
- test: '3.11'
- test: '3.12'
- test: '3.13'
+ - test: '3.14'
+ - stage: Units_2_19
+ displayName: Units 2.19
+ dependsOn: []
+ jobs:
+ - template: templates/matrix.yml
+ parameters:
+ nameFormat: Python {0}
+ testFormat: 2.19/units/{0}/1
+ targets:
+ - test: 3.8
+ - test: "3.11"
+ - test: "3.13"
- stage: Units_2_18
displayName: Units 2.18
dependsOn: []
@@ -135,6 +147,7 @@ stages:
testFormat: 2.18/units/{0}/1
targets:
- test: 3.8
+ - test: "3.11"
- test: "3.13"
- stage: Units_2_17
displayName: Units 2.17
@@ -146,19 +159,8 @@ stages:
testFormat: 2.17/units/{0}/1
targets:
- test: 3.7
+ - test: "3.10"
- test: "3.12"
- - stage: Units_2_16
- displayName: Units 2.16
- dependsOn: []
- jobs:
- - template: templates/matrix.yml
- parameters:
- nameFormat: Python {0}
- testFormat: 2.16/units/{0}/1
- targets:
- - test: 2.7
- - test: 3.6
- - test: "3.11"
## Remote
- stage: Remote_devel_extra_vms
@@ -169,10 +171,10 @@ stages:
parameters:
testFormat: devel/{0}
targets:
- - name: Alpine 3.21
- test: alpine/3.21
- # - name: Fedora 41
- # test: fedora/41
+ - name: Alpine 3.22
+ test: alpine/3.22
+ # - name: Fedora 42
+ # test: fedora/42
- name: Ubuntu 22.04
test: ubuntu/22.04
- name: Ubuntu 24.04
@@ -189,16 +191,36 @@ stages:
targets:
- name: macOS 15.3
test: macos/15.3
- - name: RHEL 9.5
- test: rhel/9.5
- - name: FreeBSD 14.2
- test: freebsd/14.2
+ - name: RHEL 10.0
+ test: rhel/10.0
+ - name: RHEL 9.6
+ test: rhel/9.6
+ - name: FreeBSD 14.3
+ test: freebsd/14.3
- name: FreeBSD 13.5
test: freebsd/13.5
groups:
- 1
- 2
- 3
+ - stage: Remote_2_19
+ displayName: Remote 2.19
+ dependsOn: []
+ jobs:
+ - template: templates/matrix.yml
+ parameters:
+ testFormat: 2.19/{0}
+ targets:
+ - name: RHEL 9.5
+ test: rhel/9.5
+ - name: RHEL 10.0
+ test: rhel/10.0
+ - name: FreeBSD 14.2
+ test: freebsd/14.2
+ groups:
+ - 1
+ - 2
+ - 3
- stage: Remote_2_18
displayName: Remote 2.18
dependsOn: []
@@ -225,34 +247,10 @@ stages:
parameters:
testFormat: 2.17/{0}
targets:
- - name: FreeBSD 13.3
- test: freebsd/13.3
+ - name: FreeBSD 13.5
+ test: freebsd/13.5
- name: RHEL 9.3
test: rhel/9.3
- - name: FreeBSD 14.0
- test: freebsd/14.0
- groups:
- - 1
- - 2
- - 3
- - stage: Remote_2_16
- displayName: Remote 2.16
- dependsOn: []
- jobs:
- - template: templates/matrix.yml
- parameters:
- testFormat: 2.16/{0}
- targets:
- - name: macOS 13.2
- test: macos/13.2
- - name: RHEL 9.2
- test: rhel/9.2
- - name: RHEL 8.8
- test: rhel/8.8
- - name: RHEL 7.9
- test: rhel/7.9
- # - name: FreeBSD 13.2
- # test: freebsd/13.2
groups:
- 1
- 2
@@ -267,10 +265,10 @@ stages:
parameters:
testFormat: devel/linux/{0}
targets:
- - name: Fedora 41
- test: fedora41
- - name: Alpine 3.21
- test: alpine321
+ - name: Fedora 42
+ test: fedora42
+ - name: Alpine 3.22
+ test: alpine322
- name: Ubuntu 22.04
test: ubuntu2204
- name: Ubuntu 24.04
@@ -279,6 +277,22 @@ stages:
- 1
- 2
- 3
+ - stage: Docker_2_19
+ displayName: Docker 2.19
+ dependsOn: []
+ jobs:
+ - template: templates/matrix.yml
+ parameters:
+ testFormat: 2.19/linux/{0}
+ targets:
+ - name: Fedora 41
+ test: fedora41
+ - name: Alpine 3.21
+ test: alpine321
+ groups:
+ - 1
+ - 2
+ - 3
- stage: Docker_2_18
displayName: Docker 2.18
dependsOn: []
@@ -315,26 +329,6 @@ stages:
- 1
- 2
- 3
- - stage: Docker_2_16
- displayName: Docker 2.16
- dependsOn: []
- jobs:
- - template: templates/matrix.yml
- parameters:
- testFormat: 2.16/linux/{0}
- targets:
- - name: Fedora 38
- test: fedora38
- - name: openSUSE 15
- test: opensuse15
- - name: Alpine 3
- test: alpine3
- - name: CentOS 7
- test: centos7
- groups:
- - 1
- - 2
- - 3
### Community Docker
- stage: Docker_community_devel
@@ -367,8 +361,19 @@ stages:
# nameFormat: Python {0}
# testFormat: devel/generic/{0}/1
# targets:
-# - test: '3.8'
-# - test: '3.11'
+# - test: '3.9'
+# - test: '3.12'
+# - test: '3.14'
+# - stage: Generic_2_19
+# displayName: Generic 2.19
+# dependsOn: []
+# jobs:
+# - template: templates/matrix.yml
+# parameters:
+# nameFormat: Python {0}
+# testFormat: 2.19/generic/{0}/1
+# targets:
+# - test: '3.9'
# - test: '3.13'
# - stage: Generic_2_18
# displayName: Generic 2.18
@@ -392,44 +397,32 @@ stages:
# targets:
# - test: '3.7'
# - test: '3.12'
-# - stage: Generic_2_16
-# displayName: Generic 2.16
-# dependsOn: []
-# jobs:
-# - template: templates/matrix.yml
-# parameters:
-# nameFormat: Python {0}
-# testFormat: 2.16/generic/{0}/1
-# targets:
-# - test: '2.7'
-# - test: '3.6'
-# - test: '3.11'
- stage: Summary
condition: succeededOrFailed()
dependsOn:
- Sanity_devel
+ - Sanity_2_19
- Sanity_2_18
- Sanity_2_17
- - Sanity_2_16
- Units_devel
+ - Units_2_19
- Units_2_18
- Units_2_17
- - Units_2_16
- Remote_devel_extra_vms
- Remote_devel
+ - Remote_2_19
- Remote_2_18
- Remote_2_17
- - Remote_2_16
- Docker_devel
+ - Docker_2_19
- Docker_2_18
- Docker_2_17
- - Docker_2_16
- Docker_community_devel
# Right now all generic tests are disabled. Uncomment when at least one of them is re-enabled.
# - Generic_devel
+# - Generic_2_19
# - Generic_2_18
# - Generic_2_17
-# - Generic_2_16
jobs:
- template: templates/coverage.yml
diff --git a/.azure-pipelines/templates/matrix.yml b/.azure-pipelines/templates/matrix.yml
index 4876375855..49f5d8595a 100644
--- a/.azure-pipelines/templates/matrix.yml
+++ b/.azure-pipelines/templates/matrix.yml
@@ -50,11 +50,11 @@ jobs:
parameters:
jobs:
- ${{ if eq(length(parameters.groups), 0) }}:
- - ${{ each target in parameters.targets }}:
- - name: ${{ format(parameters.nameFormat, coalesce(target.name, target.test)) }}
- test: ${{ format(parameters.testFormat, coalesce(target.test, target.name)) }}
- - ${{ if not(eq(length(parameters.groups), 0)) }}:
- - ${{ each group in parameters.groups }}:
- ${{ each target in parameters.targets }}:
- - name: ${{ format(format(parameters.nameGroupFormat, parameters.nameFormat), coalesce(target.name, target.test), group) }}
- test: ${{ format(format(parameters.testGroupFormat, parameters.testFormat), coalesce(target.test, target.name), group) }}
+ - name: ${{ format(parameters.nameFormat, coalesce(target.name, target.test)) }}
+ test: ${{ format(parameters.testFormat, coalesce(target.test, target.name)) }}
+ - ${{ if not(eq(length(parameters.groups), 0)) }}:
+ - ${{ each group in parameters.groups }}:
+ - ${{ each target in parameters.targets }}:
+ - name: ${{ format(format(parameters.nameGroupFormat, parameters.nameFormat), coalesce(target.name, target.test), group) }}
+ test: ${{ format(format(parameters.testGroupFormat, parameters.testFormat), coalesce(target.test, target.name), group) }}
diff --git a/.azure-pipelines/templates/test.yml b/.azure-pipelines/templates/test.yml
index 700cf629d7..b263379c06 100644
--- a/.azure-pipelines/templates/test.yml
+++ b/.azure-pipelines/templates/test.yml
@@ -14,37 +14,37 @@ parameters:
jobs:
- ${{ each job in parameters.jobs }}:
- - job: test_${{ replace(replace(replace(job.test, '/', '_'), '.', '_'), '-', '_') }}
- displayName: ${{ job.name }}
- container: default
- workspace:
- clean: all
- steps:
- - checkout: self
- fetchDepth: $(fetchDepth)
- path: $(checkoutPath)
- - bash: .azure-pipelines/scripts/run-tests.sh "$(entryPoint)" "${{ job.test }}" "$(coverageBranches)"
- displayName: Run Tests
- - bash: .azure-pipelines/scripts/process-results.sh
- condition: succeededOrFailed()
- displayName: Process Results
- - bash: .azure-pipelines/scripts/aggregate-coverage.sh "$(Agent.TempDirectory)"
- condition: eq(variables.haveCoverageData, 'true')
- displayName: Aggregate Coverage Data
- - task: PublishTestResults@2
- condition: eq(variables.haveTestResults, 'true')
- inputs:
- testResultsFiles: "$(outputPath)/junit/*.xml"
- displayName: Publish Test Results
- - task: PublishPipelineArtifact@1
- condition: eq(variables.haveBotResults, 'true')
- displayName: Publish Bot Results
- inputs:
- targetPath: "$(outputPath)/bot/"
- artifactName: "Bot $(System.JobAttempt) $(System.StageDisplayName) $(System.JobDisplayName)"
- - task: PublishPipelineArtifact@1
- condition: eq(variables.haveCoverageData, 'true')
- displayName: Publish Coverage Data
- inputs:
- targetPath: "$(Agent.TempDirectory)/coverage/"
- artifactName: "Coverage $(System.JobAttempt) $(System.StageDisplayName) $(System.JobDisplayName)"
+ - job: test_${{ replace(replace(replace(job.test, '/', '_'), '.', '_'), '-', '_') }}
+ displayName: ${{ job.name }}
+ container: default
+ workspace:
+ clean: all
+ steps:
+ - checkout: self
+ fetchDepth: $(fetchDepth)
+ path: $(checkoutPath)
+ - bash: .azure-pipelines/scripts/run-tests.sh "$(entryPoint)" "${{ job.test }}" "$(coverageBranches)"
+ displayName: Run Tests
+ - bash: .azure-pipelines/scripts/process-results.sh
+ condition: succeededOrFailed()
+ displayName: Process Results
+ - bash: .azure-pipelines/scripts/aggregate-coverage.sh "$(Agent.TempDirectory)"
+ condition: eq(variables.haveCoverageData, 'true')
+ displayName: Aggregate Coverage Data
+ - task: PublishTestResults@2
+ condition: eq(variables.haveTestResults, 'true')
+ inputs:
+ testResultsFiles: "$(outputPath)/junit/*.xml"
+ displayName: Publish Test Results
+ - task: PublishPipelineArtifact@1
+ condition: eq(variables.haveBotResults, 'true')
+ displayName: Publish Bot Results
+ inputs:
+ targetPath: "$(outputPath)/bot/"
+ artifactName: "Bot $(System.JobAttempt) $(System.StageDisplayName) $(System.JobDisplayName)"
+ - task: PublishPipelineArtifact@1
+ condition: eq(variables.haveCoverageData, 'true')
+ displayName: Publish Coverage Data
+ inputs:
+ targetPath: "$(Agent.TempDirectory)/coverage/"
+ artifactName: "Coverage $(System.JobAttempt) $(System.StageDisplayName) $(System.JobDisplayName)"
diff --git a/.git-blame-ignore-revs b/.git-blame-ignore-revs
new file mode 100644
index 0000000000..cd4bdfee65
--- /dev/null
+++ b/.git-blame-ignore-revs
@@ -0,0 +1,9 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# YAML reformatting
+d032de3b16eed11ea3a31cd3d96d78f7c46a2ee0
+e8f965fbf8154ea177c6622da149f2ae8533bd3c
+e938ca5f20651abc160ee6aba10014013d04dcc1
+eaa5e07b2866e05b6c7b5628ca92e9cb1142d008
diff --git a/.github/BOTMETA.yml b/.github/BOTMETA.yml
index 501c1370d4..fac3fae8f8 100644
--- a/.github/BOTMETA.yml
+++ b/.github/BOTMETA.yml
@@ -77,6 +77,8 @@ files:
$callbacks/opentelemetry.py:
keywords: opentelemetry observability
maintainers: v1v
+ $callbacks/print_task.py:
+ maintainers: demonpig
$callbacks/say.py:
keywords: brew cask darwin homebrew macosx macports osx
labels: macos say
@@ -90,6 +92,8 @@ files:
maintainers: ryancurrah
$callbacks/syslog_json.py:
maintainers: imjoseangel
+ $callbacks/tasks_only.py:
+ maintainers: felixfontein
$callbacks/timestamp.py:
maintainers: kurokobo
$callbacks/unixy.py:
@@ -111,9 +115,6 @@ files:
$connections/lxd.py:
labels: lxd
maintainers: mattclay
- $connections/proxmox_pct_remote.py:
- labels: proxmox
- maintainers: mietzen
$connections/qubes.py:
maintainers: kushaldas
$connections/saltstack.py:
@@ -125,8 +126,6 @@ files:
maintainers: $team_ansible_core
$doc_fragments/:
labels: docs_fragments
- $doc_fragments/clc.py:
- maintainers: clc-runner russoz
$doc_fragments/django.py:
maintainers: russoz
$doc_fragments/hpe3par.py:
@@ -210,6 +209,8 @@ files:
maintainers: resmo
$filters/to_months.yml:
maintainers: resmo
+ $filters/to_prettytable.py:
+ maintainers: tgadiev
$filters/to_seconds.yml:
maintainers: resmo
$filters/to_time_unit.yml:
@@ -245,13 +246,9 @@ files:
keywords: opennebula dynamic inventory script
labels: cloud opennebula
maintainers: feldsam
- $inventories/proxmox.py:
- maintainers: $team_virt ilijamt krauthosting
$inventories/scaleway.py:
labels: cloud scaleway
maintainers: $team_scaleway
- $inventories/stackpath_compute.py:
- maintainers: shayrybak
$inventories/virtualbox.py: {}
$inventories/xen_orchestra.py:
maintainers: ddelnano shinuza
@@ -295,9 +292,6 @@ files:
$lookups/lastpass.py: {}
$lookups/lmdb_kv.py:
maintainers: jpmens
- $lookups/manifold.py:
- labels: manifold
- maintainers: galanoff
$lookups/merge_variables.py:
maintainers: rlenferink m-a-r-k-e alpex8
$lookups/onepass:
@@ -385,6 +379,8 @@ files:
$module_utils/pipx.py:
labels: pipx
maintainers: russoz
+ $module_utils/pkg_req.py:
+ maintainers: russoz
$module_utils/python_runner.py:
maintainers: russoz
$module_utils/puppet.py:
@@ -419,6 +415,8 @@ files:
$module_utils/wdc_redfish_utils.py:
labels: wdc_redfish_utils
maintainers: $team_wdc
+ $module_utils/xdg_mime.py:
+ maintainers: mhalano
$module_utils/xenserver.py:
labels: xenserver
maintainers: bvitnik
@@ -502,8 +500,6 @@ files:
maintainers: NickatEpic
$modules/cisco_webex.py:
maintainers: drew-russell
- $modules/clc_:
- maintainers: clc-runner
$modules/cloud_init_data_facts.py:
maintainers: resmo
$modules/cloudflare_dns.py:
@@ -662,8 +658,6 @@ files:
maintainers: marns93
$modules/hg.py:
maintainers: yeukhon
- $modules/hipchat.py:
- maintainers: pb8226 shirou
$modules/homebrew.py:
ignore: ryansb
keywords: brew cask darwin homebrew macosx macports osx
@@ -789,6 +783,8 @@ files:
maintainers: brettmilford unnecessary-username juanmcasanova
$modules/jenkins_build_info.py:
maintainers: juanmcasanova
+ $modules/jenkins_credential.py:
+ maintainers: YoussefKhalidAli
$modules/jenkins_job.py:
maintainers: sermilrod
$modules/jenkins_job_info.py:
@@ -905,6 +901,8 @@ files:
maintainers: nerzhul
$modules/lvg.py:
maintainers: abulimov
+ $modules/lvm_pv.py:
+ maintainers: klention
$modules/lvg_rename.py:
maintainers: lszomor
$modules/lvol.py:
@@ -1059,7 +1057,7 @@ files:
$modules/ovh_monthly_billing.py:
maintainers: fraff
$modules/pacemaker_cluster.py:
- maintainers: matbu
+ maintainers: matbu munchtoast
$modules/pacemaker_resource.py:
maintainers: munchtoast
$modules/packet_:
@@ -1137,36 +1135,6 @@ files:
maintainers: $team_bsd berenddeboer
$modules/pritunl_:
maintainers: Lowess
- $modules/profitbricks:
- maintainers: baldwinSPC
- $modules/proxmox:
- keywords: kvm libvirt proxmox qemu
- labels: proxmox virt
- maintainers: $team_virt UnderGreen krauthosting
- ignore: tleguern
- $modules/proxmox.py:
- ignore: skvidal
- maintainers: UnderGreen krauthosting
- $modules/proxmox_disk.py:
- maintainers: castorsky krauthosting
- $modules/proxmox_kvm.py:
- ignore: skvidal
- maintainers: helldorado krauthosting
- $modules/proxmox_backup.py:
- maintainers: IamLunchbox
- $modules/proxmox_backup_info.py:
- maintainers: raoufnezhad mmayabi
- $modules/proxmox_nic.py:
- maintainers: Kogelvis krauthosting
- $modules/proxmox_node_info.py:
- maintainers: jwbernin krauthosting
- $modules/proxmox_storage_contents_info.py:
- maintainers: l00ptr krauthosting
- $modules/proxmox_tasks_info:
- maintainers: paginabianca krauthosting
- $modules/proxmox_template.py:
- ignore: skvidal
- maintainers: UnderGreen krauthosting
$modules/pubnub_blocks.py:
maintainers: parfeon pubnub
$modules/pulp_repo.py:
@@ -1237,9 +1205,9 @@ files:
$modules/scaleway_compute_private_network.py:
maintainers: pastral
$modules/scaleway_container.py:
- maintainers: Lunik
+ maintainers: Lunik
$modules/scaleway_container_info.py:
- maintainers: Lunik
+ maintainers: Lunik
$modules/scaleway_container_namespace.py:
maintainers: Lunik
$modules/scaleway_container_namespace_info.py:
@@ -1447,6 +1415,8 @@ files:
maintainers: dinoocch the-maldridge
$modules/xcc_:
maintainers: panyy3 renxulei
+ $modules/xdg_mime.py:
+ maintainers: mhalano
$modules/xenserver_:
maintainers: bvitnik
$modules/xenserver_facts.py:
@@ -1479,6 +1449,8 @@ files:
maintainers: natefoo
$modules/znode.py:
maintainers: treyperry
+ $modules/zpool.py:
+ maintainers: tomhesse
$modules/zpool_facts:
keywords: beadm dladm illumos ipadm nexenta omnios openindiana pfexec smartos solaris sunos zfs zpool
labels: solaris
@@ -1545,6 +1517,22 @@ files:
maintainers: russoz
docs/docsite/rst/guide_deps.rst:
maintainers: russoz
+ docs/docsite/rst/guide_iocage.rst:
+ maintainers: russoz felixfontein
+ docs/docsite/rst/guide_iocage_inventory.rst:
+ maintainers: vbotka
+ docs/docsite/rst/guide_iocage_inventory_aliases.rst:
+ maintainers: vbotka
+ docs/docsite/rst/guide_iocage_inventory_basics.rst:
+ maintainers: vbotka
+ docs/docsite/rst/guide_iocage_inventory_dhcp.rst:
+ maintainers: vbotka
+ docs/docsite/rst/guide_iocage_inventory_hooks.rst:
+ maintainers: vbotka
+ docs/docsite/rst/guide_iocage_inventory_properties.rst:
+ maintainers: vbotka
+ docs/docsite/rst/guide_iocage_inventory_tags.rst:
+ maintainers: vbotka
docs/docsite/rst/guide_modulehelper.rst:
maintainers: russoz
docs/docsite/rst/guide_online.rst:
diff --git a/.github/ISSUE_TEMPLATE/bug_report.yml b/.github/ISSUE_TEMPLATE/bug_report.yml
index f64de2abe3..4b1c1bfb95 100644
--- a/.github/ISSUE_TEMPLATE/bug_report.yml
+++ b/.github/ISSUE_TEMPLATE/bug_report.yml
@@ -7,147 +7,147 @@ name: Bug report
description: Create a report to help us improve
body:
-- type: markdown
- attributes:
- value: |
- ⚠
- Verify first that your issue is not [already reported on GitHub][issue search].
- Also test if the latest release and devel branch are affected too.
- *Complete **all** sections as described, this form is processed automatically.*
+ - type: markdown
+ attributes:
+ value: |
+ ⚠
+ Verify first that your issue is not [already reported on GitHub][issue search].
+ Also test if the latest release and devel branch are affected too.
+ *Complete **all** sections as described, this form is processed automatically.*
- [issue search]: https://github.com/ansible-collections/community.general/search?q=is%3Aissue&type=issues
+ [issue search]: https://github.com/ansible-collections/community.general/search?q=is%3Aissue&type=issues
-- type: textarea
- attributes:
- label: Summary
- description: Explain the problem briefly below.
- placeholder: >-
- When I try to do X with the collection from the main branch on GitHub, Y
- breaks in a way Z under the env E. Here are all the details I know
- about this problem...
- validations:
- required: true
-
-- type: dropdown
- attributes:
- label: Issue Type
- # FIXME: Once GitHub allows defining the default choice, update this
- options:
- - Bug Report
- validations:
- required: true
-
-- type: textarea
- attributes:
- # For smaller collections we could use a multi-select and hardcode the list
- # May generate this list via GitHub action and walking files under https://github.com/ansible-collections/community.general/tree/main/plugins
- # Select from list, filter as you type (`mysql` would only show the 3 mysql components)
- # OR freeform - doesn't seem to be supported in adaptivecards
- label: Component Name
- description: >-
- Write the short name of the module, plugin, task or feature below,
- *use your best guess if unsure*. Do not include `community.general.`!
- placeholder: dnf, apt, yum, pip, user etc.
- validations:
- required: true
-
-- type: textarea
- attributes:
- label: Ansible Version
- description: >-
- Paste verbatim output from `ansible --version` between
- tripple backticks.
- value: |
- ```console (paste below)
- $ ansible --version
-
- ```
- validations:
- required: true
-
-- type: textarea
- attributes:
- label: Community.general Version
- description: >-
- Paste verbatim output from "ansible-galaxy collection list community.general"
- between tripple backticks.
- value: |
- ```console (paste below)
- $ ansible-galaxy collection list community.general
-
- ```
- validations:
- required: true
-
-- type: textarea
- attributes:
- label: Configuration
- description: >-
- If this issue has an example piece of YAML that can help to reproduce this problem, please provide it.
- This can be a piece of YAML from, e.g., an automation, script, scene or configuration.
- Paste verbatim output from `ansible-config dump --only-changed` between quotes
- value: |
- ```console (paste below)
- $ ansible-config dump --only-changed
-
- ```
-
-
-- type: textarea
- attributes:
- label: OS / Environment
- description: >-
- Provide all relevant information below, e.g. target OS versions,
- network device firmware, etc.
- placeholder: RHEL 8, CentOS Stream etc.
- validations:
- required: false
-
-
-- type: textarea
- attributes:
- label: Steps to Reproduce
- description: |
- Describe exactly how to reproduce the problem, using a minimal test-case. It would *really* help us understand your problem if you could also passed any playbooks, configs and commands you used.
-
- **HINT:** You can paste https://gist.github.com links for larger files.
- value: |
-
- ```yaml (paste below)
-
- ```
- validations:
- required: true
-
-- type: textarea
- attributes:
- label: Expected Results
- description: >-
- Describe what you expected to happen when running the steps above.
- placeholder: >-
- I expected X to happen because I assumed Y.
- that it did not.
- validations:
- required: true
-
-- type: textarea
- attributes:
- label: Actual Results
- description: |
- Describe what actually happened. If possible run with extra verbosity (`-vvvv`).
-
- Paste verbatim command output between quotes.
- value: |
- ```console (paste below)
-
- ```
-- type: checkboxes
- attributes:
- label: Code of Conduct
- description: |
- Read the [Ansible Code of Conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html?utm_medium=github&utm_source=issue_form--ansible-collections) first.
- options:
- - label: I agree to follow the Ansible Code of Conduct
+ - type: textarea
+ attributes:
+ label: Summary
+ description: Explain the problem briefly below.
+ placeholder: >-
+ When I try to do X with the collection from the main branch on GitHub, Y
+ breaks in a way Z under the env E. Here are all the details I know
+ about this problem...
+ validations:
required: true
+
+ - type: dropdown
+ attributes:
+ label: Issue Type
+ # FIXME: Once GitHub allows defining the default choice, update this
+ options:
+ - Bug Report
+ validations:
+ required: true
+
+ - type: textarea
+ attributes:
+ # For smaller collections we could use a multi-select and hardcode the list
+ # May generate this list via GitHub action and walking files under https://github.com/ansible-collections/community.general/tree/main/plugins
+ # Select from list, filter as you type (`mysql` would only show the 3 mysql components)
+ # OR freeform - doesn't seem to be supported in adaptivecards
+ label: Component Name
+ description: >-
+ Write the short name of the module, plugin, task or feature below,
+ *use your best guess if unsure*. Do not include `community.general.`!
+ placeholder: dnf, apt, yum, pip, user etc.
+ validations:
+ required: true
+
+ - type: textarea
+ attributes:
+ label: Ansible Version
+ description: >-
+ Paste verbatim output from `ansible --version` between
+ tripple backticks.
+ value: |
+ ```console (paste below)
+ $ ansible --version
+
+ ```
+ validations:
+ required: true
+
+ - type: textarea
+ attributes:
+ label: Community.general Version
+ description: >-
+ Paste verbatim output from "ansible-galaxy collection list community.general"
+ between tripple backticks.
+ value: |
+ ```console (paste below)
+ $ ansible-galaxy collection list community.general
+
+ ```
+ validations:
+ required: true
+
+ - type: textarea
+ attributes:
+ label: Configuration
+ description: >-
+ If this issue has an example piece of YAML that can help to reproduce this problem, please provide it.
+ This can be a piece of YAML from, e.g., an automation, script, scene or configuration.
+ Paste verbatim output from `ansible-config dump --only-changed` between quotes
+ value: |
+ ```console (paste below)
+ $ ansible-config dump --only-changed
+
+ ```
+
+
+ - type: textarea
+ attributes:
+ label: OS / Environment
+ description: >-
+ Provide all relevant information below, e.g. target OS versions,
+ network device firmware, etc.
+ placeholder: RHEL 8, CentOS Stream etc.
+ validations:
+ required: false
+
+
+ - type: textarea
+ attributes:
+ label: Steps to Reproduce
+ description: |
+ Describe exactly how to reproduce the problem, using a minimal test-case. It would *really* help us understand your problem if you could also passed any playbooks, configs and commands you used.
+
+ **HINT:** You can paste https://gist.github.com links for larger files.
+ value: |
+
+ ```yaml (paste below)
+
+ ```
+ validations:
+ required: true
+
+ - type: textarea
+ attributes:
+ label: Expected Results
+ description: >-
+ Describe what you expected to happen when running the steps above.
+ placeholder: >-
+ I expected X to happen because I assumed Y.
+ that it did not.
+ validations:
+ required: true
+
+ - type: textarea
+ attributes:
+ label: Actual Results
+ description: |
+ Describe what actually happened. If possible run with extra verbosity (`-vvvv`).
+
+ Paste verbatim command output between quotes.
+ value: |
+ ```console (paste below)
+
+ ```
+ - type: checkboxes
+ attributes:
+ label: Code of Conduct
+ description: |
+ Read the [Ansible Code of Conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html?utm_medium=github&utm_source=issue_form--ansible-collections) first.
+ options:
+ - label: I agree to follow the Ansible Code of Conduct
+ required: true
...
diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml
index 0cc2db058c..476eed516e 100644
--- a/.github/ISSUE_TEMPLATE/config.yml
+++ b/.github/ISSUE_TEMPLATE/config.yml
@@ -6,26 +6,26 @@
# Ref: https://help.github.com/en/github/building-a-strong-community/configuring-issue-templates-for-your-repository#configuring-the-template-chooser
blank_issues_enabled: false # default: true
contact_links:
-- name: Security bug report
- url: https://docs.ansible.com/ansible-core/devel/community/reporting_bugs_and_features.html?utm_medium=github&utm_source=issue_template_chooser_ansible_collections
- about: |
- Please learn how to report security vulnerabilities here.
+ - name: Security bug report
+ url: https://docs.ansible.com/ansible-core/devel/community/reporting_bugs_and_features.html?utm_medium=github&utm_source=issue_template_chooser_ansible_collections
+ about: |
+ Please learn how to report security vulnerabilities here.
- For all security related bugs, email security@ansible.com
- instead of using this issue tracker and you will receive
- a prompt response.
+ For all security related bugs, email security@ansible.com
+ instead of using this issue tracker and you will receive
+ a prompt response.
- For more information, see
- https://docs.ansible.com/ansible/latest/community/reporting_bugs_and_features.html
-- name: Ansible Code of Conduct
- url: https://docs.ansible.com/ansible/latest/community/code_of_conduct.html?utm_medium=github&utm_source=issue_template_chooser_ansible_collections
- about: Be nice to other members of the community.
-- name: Talks to the community
- url: https://docs.ansible.com/ansible/latest/community/communication.html?utm_medium=github&utm_source=issue_template_chooser#mailing-list-information
- about: Please ask and answer usage questions here
-- name: Working groups
- url: https://github.com/ansible/community/wiki
- about: Interested in improving a specific area? Become a part of a working group!
-- name: For Enterprise
- url: https://www.ansible.com/products/engine?utm_medium=github&utm_source=issue_template_chooser_ansible_collections
- about: Red Hat offers support for the Ansible Automation Platform
+ For more information, see
+ https://docs.ansible.com/ansible/latest/community/reporting_bugs_and_features.html
+ - name: Ansible Code of Conduct
+ url: https://docs.ansible.com/ansible/latest/community/code_of_conduct.html?utm_medium=github&utm_source=issue_template_chooser_ansible_collections
+ about: Be nice to other members of the community.
+ - name: Talks to the community
+ url: https://docs.ansible.com/ansible/latest/community/communication.html?utm_medium=github&utm_source=issue_template_chooser#mailing-list-information
+ about: Please ask and answer usage questions here
+ - name: Working groups
+ url: https://github.com/ansible/community/wiki
+ about: Interested in improving a specific area? Become a part of a working group!
+ - name: For Enterprise
+ url: https://www.ansible.com/products/engine?utm_medium=github&utm_source=issue_template_chooser_ansible_collections
+ about: Red Hat offers support for the Ansible Automation Platform
diff --git a/.github/ISSUE_TEMPLATE/documentation_report.yml b/.github/ISSUE_TEMPLATE/documentation_report.yml
index 6ec49fcb37..2ad4bce44a 100644
--- a/.github/ISSUE_TEMPLATE/documentation_report.yml
+++ b/.github/ISSUE_TEMPLATE/documentation_report.yml
@@ -8,122 +8,122 @@ description: Ask us about docs
# NOTE: issue body is enabled to allow screenshots
body:
-- type: markdown
- attributes:
- value: |
- ⚠
- Verify first that your issue is not [already reported on GitHub][issue search].
- Also test if the latest release and devel branch are affected too.
- *Complete **all** sections as described, this form is processed automatically.*
+ - type: markdown
+ attributes:
+ value: |
+ ⚠
+ Verify first that your issue is not [already reported on GitHub][issue search].
+ Also test if the latest release and devel branch are affected too.
+ *Complete **all** sections as described, this form is processed automatically.*
- [issue search]: https://github.com/ansible-collections/community.general/search?q=is%3Aissue&type=issues
+ [issue search]: https://github.com/ansible-collections/community.general/search?q=is%3Aissue&type=issues
-- type: textarea
- attributes:
- label: Summary
- description: |
- Explain the problem briefly below, add suggestions to wording or structure.
+ - type: textarea
+ attributes:
+ label: Summary
+ description: |
+ Explain the problem briefly below, add suggestions to wording or structure.
- **HINT:** Did you know the documentation has an `Edit on GitHub` link on every page?
- placeholder: >-
- I was reading the Collection documentation of version X and I'm having
- problems understanding Y. It would be very helpful if that got
- rephrased as Z.
- validations:
- required: true
-
-- type: dropdown
- attributes:
- label: Issue Type
- # FIXME: Once GitHub allows defining the default choice, update this
- options:
- - Documentation Report
- validations:
- required: true
-
-- type: input
- attributes:
- label: Component Name
- description: >-
- Write the short name of the file, module, plugin, task or feature below,
- *use your best guess if unsure*. Do not include `community.general.`!
- placeholder: mysql_user
- validations:
- required: true
-
-- type: textarea
- attributes:
- label: Ansible Version
- description: >-
- Paste verbatim output from `ansible --version` between
- tripple backticks.
- value: |
- ```console (paste below)
- $ ansible --version
-
- ```
- validations:
- required: false
-
-- type: textarea
- attributes:
- label: Community.general Version
- description: >-
- Paste verbatim output from "ansible-galaxy collection list community.general"
- between tripple backticks.
- value: |
- ```console (paste below)
- $ ansible-galaxy collection list community.general
-
- ```
- validations:
- required: true
-
-- type: textarea
- attributes:
- label: Configuration
- description: >-
- Paste verbatim output from `ansible-config dump --only-changed` between quotes.
- value: |
- ```console (paste below)
- $ ansible-config dump --only-changed
-
- ```
- validations:
- required: false
-
-- type: textarea
- attributes:
- label: OS / Environment
- description: >-
- Provide all relevant information below, e.g. OS version,
- browser, etc.
- placeholder: Fedora 33, Firefox etc.
- validations:
- required: false
-
-- type: textarea
- attributes:
- label: Additional Information
- description: |
- Describe how this improves the documentation, e.g. before/after situation or screenshots.
-
- **Tip:** It's not possible to upload the screenshot via this field directly but you can use the last textarea in this form to attach them.
-
- **HINT:** You can paste https://gist.github.com links for larger files.
- placeholder: >-
- When the improvement is applied, it makes it more straightforward
- to understand X.
- validations:
- required: false
-
-- type: checkboxes
- attributes:
- label: Code of Conduct
- description: |
- Read the [Ansible Code of Conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html?utm_medium=github&utm_source=issue_form--ansible-collections) first.
- options:
- - label: I agree to follow the Ansible Code of Conduct
+ **HINT:** Did you know the documentation has an `Edit on GitHub` link on every page?
+ placeholder: >-
+ I was reading the Collection documentation of version X and I'm having
+ problems understanding Y. It would be very helpful if that got
+ rephrased as Z.
+ validations:
required: true
+
+ - type: dropdown
+ attributes:
+ label: Issue Type
+ # FIXME: Once GitHub allows defining the default choice, update this
+ options:
+ - Documentation Report
+ validations:
+ required: true
+
+ - type: input
+ attributes:
+ label: Component Name
+ description: >-
+ Write the short name of the file, module, plugin, task or feature below,
+ *use your best guess if unsure*. Do not include `community.general.`!
+ placeholder: mysql_user
+ validations:
+ required: true
+
+ - type: textarea
+ attributes:
+ label: Ansible Version
+ description: >-
+ Paste verbatim output from `ansible --version` between
+ tripple backticks.
+ value: |
+ ```console (paste below)
+ $ ansible --version
+
+ ```
+ validations:
+ required: false
+
+ - type: textarea
+ attributes:
+ label: Community.general Version
+ description: >-
+ Paste verbatim output from "ansible-galaxy collection list community.general"
+ between tripple backticks.
+ value: |
+ ```console (paste below)
+ $ ansible-galaxy collection list community.general
+
+ ```
+ validations:
+ required: true
+
+ - type: textarea
+ attributes:
+ label: Configuration
+ description: >-
+ Paste verbatim output from `ansible-config dump --only-changed` between quotes.
+ value: |
+ ```console (paste below)
+ $ ansible-config dump --only-changed
+
+ ```
+ validations:
+ required: false
+
+ - type: textarea
+ attributes:
+ label: OS / Environment
+ description: >-
+ Provide all relevant information below, e.g. OS version,
+ browser, etc.
+ placeholder: Fedora 33, Firefox etc.
+ validations:
+ required: false
+
+ - type: textarea
+ attributes:
+ label: Additional Information
+ description: |
+ Describe how this improves the documentation, e.g. before/after situation or screenshots.
+
+ **Tip:** It's not possible to upload the screenshot via this field directly but you can use the last textarea in this form to attach them.
+
+ **HINT:** You can paste https://gist.github.com links for larger files.
+ placeholder: >-
+ When the improvement is applied, it makes it more straightforward
+ to understand X.
+ validations:
+ required: false
+
+ - type: checkboxes
+ attributes:
+ label: Code of Conduct
+ description: |
+ Read the [Ansible Code of Conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html?utm_medium=github&utm_source=issue_form--ansible-collections) first.
+ options:
+ - label: I agree to follow the Ansible Code of Conduct
+ required: true
...
diff --git a/.github/ISSUE_TEMPLATE/feature_request.yml b/.github/ISSUE_TEMPLATE/feature_request.yml
index f34564283c..dc62f94c5c 100644
--- a/.github/ISSUE_TEMPLATE/feature_request.yml
+++ b/.github/ISSUE_TEMPLATE/feature_request.yml
@@ -7,67 +7,67 @@ name: Feature request
description: Suggest an idea for this project
body:
-- type: markdown
- attributes:
- value: |
- ⚠
- Verify first that your issue is not [already reported on GitHub][issue search].
- Also test if the latest release and devel branch are affected too.
- *Complete **all** sections as described, this form is processed automatically.*
+ - type: markdown
+ attributes:
+ value: |
+ ⚠
+ Verify first that your issue is not [already reported on GitHub][issue search].
+ Also test if the latest release and devel branch are affected too.
+ *Complete **all** sections as described, this form is processed automatically.*
- [issue search]: https://github.com/ansible-collections/community.general/search?q=is%3Aissue&type=issues
+ [issue search]: https://github.com/ansible-collections/community.general/search?q=is%3Aissue&type=issues
-- type: textarea
- attributes:
- label: Summary
- description: Describe the new feature/improvement briefly below.
- placeholder: >-
- I am trying to do X with the collection from the main branch on GitHub and
- I think that implementing a feature Y would be very helpful for me and
- every other user of community.general because of Z.
- validations:
- required: true
-
-- type: dropdown
- attributes:
- label: Issue Type
- # FIXME: Once GitHub allows defining the default choice, update this
- options:
- - Feature Idea
- validations:
- required: true
-
-- type: input
- attributes:
- label: Component Name
- description: >-
- Write the short name of the module or plugin, or which other part(s) of the collection this feature affects.
- *use your best guess if unsure*. Do not include `community.general.`!
- placeholder: dnf, apt, yum, pip, user etc.
- validations:
- required: true
-
-- type: textarea
- attributes:
- label: Additional Information
- description: |
- Describe how the feature would be used, why it is needed and what it would solve.
-
- **HINT:** You can paste https://gist.github.com links for larger files.
- value: |
-
- ```yaml (paste below)
-
- ```
- validations:
- required: false
-- type: checkboxes
- attributes:
- label: Code of Conduct
- description: |
- Read the [Ansible Code of Conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html?utm_medium=github&utm_source=issue_form--ansible-collections) first.
- options:
- - label: I agree to follow the Ansible Code of Conduct
+ - type: textarea
+ attributes:
+ label: Summary
+ description: Describe the new feature/improvement briefly below.
+ placeholder: >-
+ I am trying to do X with the collection from the main branch on GitHub and
+ I think that implementing a feature Y would be very helpful for me and
+ every other user of community.general because of Z.
+ validations:
required: true
+
+ - type: dropdown
+ attributes:
+ label: Issue Type
+ # FIXME: Once GitHub allows defining the default choice, update this
+ options:
+ - Feature Idea
+ validations:
+ required: true
+
+ - type: input
+ attributes:
+ label: Component Name
+ description: >-
+ Write the short name of the module or plugin, or which other part(s) of the collection this feature affects.
+ *use your best guess if unsure*. Do not include `community.general.`!
+ placeholder: dnf, apt, yum, pip, user etc.
+ validations:
+ required: true
+
+ - type: textarea
+ attributes:
+ label: Additional Information
+ description: |
+ Describe how the feature would be used, why it is needed and what it would solve.
+
+ **HINT:** You can paste https://gist.github.com links for larger files.
+ value: |
+
+ ```yaml (paste below)
+
+ ```
+ validations:
+ required: false
+ - type: checkboxes
+ attributes:
+ label: Code of Conduct
+ description: |
+ Read the [Ansible Code of Conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html?utm_medium=github&utm_source=issue_form--ansible-collections) first.
+ options:
+ - label: I agree to follow the Ansible Code of Conduct
+ required: true
...
diff --git a/.github/dependabot.yml b/.github/dependabot.yml
index 2f4ff900d8..f71b322d2a 100644
--- a/.github/dependabot.yml
+++ b/.github/dependabot.yml
@@ -9,3 +9,7 @@ updates:
directory: "/"
schedule:
interval: "weekly"
+ groups:
+ ci:
+ patterns:
+ - "*"
diff --git a/.github/workflows/ansible-test.yml b/.github/workflows/ansible-test.yml
index c5b26dba9a..89b36e6163 100644
--- a/.github/workflows/ansible-test.yml
+++ b/.github/workflows/ansible-test.yml
@@ -7,7 +7,7 @@
# https://github.com/marketplace/actions/ansible-test
name: EOL CI
-on:
+"on":
# Run EOL CI against all pushes (direct commits, also merged PRs), Pull Requests
push:
branches:
@@ -29,12 +29,7 @@ jobs:
strategy:
matrix:
ansible:
- - '2.15'
- # Ansible-test on various stable branches does not yet work well with cgroups v2.
- # Since ubuntu-latest now uses Ubuntu 22.04, we need to fall back to the ubuntu-20.04
- # image for these stable branches. The list of branches where this is necessary will
- # shrink over time, check out https://github.com/ansible-collections/news-for-maintainers/issues/28
- # for the latest list.
+ - '2.16'
runs-on: ubuntu-latest
steps:
- name: Perform sanity testing
@@ -49,11 +44,6 @@ jobs:
git clone --depth=1 --single-branch https://github.com/ansible-collections/community.internal_test_tools.git ../../community/internal_test_tools
units:
- # Ansible-test on various stable branches does not yet work well with cgroups v2.
- # Since ubuntu-latest now uses Ubuntu 22.04, we need to fall back to the ubuntu-20.04
- # image for these stable branches. The list of branches where this is necessary will
- # shrink over time, check out https://github.com/ansible-collections/news-for-maintainers/issues/28
- # for the latest list.
runs-on: ubuntu-latest
name: EOL Units (Ⓐ${{ matrix.ansible }}+py${{ matrix.python }})
strategy:
@@ -67,12 +57,12 @@ jobs:
exclude:
- ansible: ''
include:
- - ansible: '2.15'
+ - ansible: '2.16'
python: '2.7'
- - ansible: '2.15'
- python: '3.5'
- - ansible: '2.15'
- python: '3.10'
+ - ansible: '2.16'
+ python: '3.6'
+ - ansible: '2.16'
+ python: '3.11'
steps:
- name: >-
@@ -92,11 +82,6 @@ jobs:
testing-type: units
integration:
- # Ansible-test on various stable branches does not yet work well with cgroups v2.
- # Since ubuntu-latest now uses Ubuntu 22.04, we need to fall back to the ubuntu-20.04
- # image for these stable branches. The list of branches where this is necessary will
- # shrink over time, check out https://github.com/ansible-collections/news-for-maintainers/issues/28
- # for the latest list.
runs-on: ubuntu-latest
name: EOL I (Ⓐ${{ matrix.ansible }}+${{ matrix.docker }}+py${{ matrix.python }}:${{ matrix.target }})
strategy:
@@ -113,43 +98,56 @@ jobs:
exclude:
- ansible: ''
include:
- # 2.15
- - ansible: '2.15'
- docker: alpine3
+ # 2.16
+ # CentOS 7 does not work in GHA, that's why it's not listed here.
+ - ansible: '2.16'
+ docker: fedora38
python: ''
target: azp/posix/1/
- - ansible: '2.15'
- docker: alpine3
+ - ansible: '2.16'
+ docker: fedora38
python: ''
target: azp/posix/2/
- - ansible: '2.15'
- docker: alpine3
+ - ansible: '2.16'
+ docker: fedora38
python: ''
target: azp/posix/3/
- - ansible: '2.15'
- docker: fedora37
+ - ansible: '2.16'
+ docker: opensuse15
python: ''
target: azp/posix/1/
- - ansible: '2.15'
- docker: fedora37
+ - ansible: '2.16'
+ docker: opensuse15
python: ''
target: azp/posix/2/
- - ansible: '2.15'
- docker: fedora37
+ - ansible: '2.16'
+ docker: opensuse15
+ python: ''
+ target: azp/posix/3/
+ - ansible: '2.16'
+ docker: alpine3
+ python: ''
+ target: azp/posix/1/
+ - ansible: '2.16'
+ docker: alpine3
+ python: ''
+ target: azp/posix/2/
+ - ansible: '2.16'
+ docker: alpine3
python: ''
target: azp/posix/3/
# Right now all generic tests are disabled. Uncomment when at least one of them is re-enabled.
- # - ansible: '2.13'
+ # - ansible: '2.16'
# docker: default
- # python: '3.9'
+ # python: '2.7'
# target: azp/generic/1/
- # - ansible: '2.14'
+ # - ansible: '2.16'
# docker: default
- # python: '3.10'
+ # python: '3.6'
# target: azp/generic/1/
- # - ansible: '2.15'
+ # - ansible: '2.16'
# docker: default
- # python: '3.9'
+ # python: '3.11'
# target: azp/generic/1/
steps:
@@ -166,12 +164,13 @@ jobs:
integration-continue-on-error: 'false'
integration-diff: 'false'
integration-retry-on-error: 'true'
+ # TODO: remove "--branch stable-2" from community.crypto install once we're only using ansible-core 2.17 or newer!
pre-test-cmd: >-
mkdir -p ../../ansible
;
git clone --depth=1 --single-branch https://github.com/ansible-collections/ansible.posix.git ../../ansible/posix
;
- git clone --depth=1 --single-branch https://github.com/ansible-collections/community.crypto.git ../../community/crypto
+ git clone --depth=1 --single-branch --branch stable-2 https://github.com/ansible-collections/community.crypto.git ../../community/crypto
;
git clone --depth=1 --single-branch https://github.com/ansible-collections/community.docker.git ../../community/docker
;
diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml
index e8572fafb6..ec344315bb 100644
--- a/.github/workflows/codeql-analysis.yml
+++ b/.github/workflows/codeql-analysis.yml
@@ -5,7 +5,7 @@
name: "Code scanning - action"
-on:
+"on":
schedule:
- cron: '26 19 * * 1'
workflow_dispatch:
@@ -23,16 +23,16 @@ jobs:
runs-on: ubuntu-latest
steps:
- - name: Checkout repository
- uses: actions/checkout@v4
- with:
- persist-credentials: false
+ - name: Checkout repository
+ uses: actions/checkout@v4
+ with:
+ persist-credentials: false
- # Initializes the CodeQL tools for scanning.
- - name: Initialize CodeQL
- uses: github/codeql-action/init@v3
- with:
- languages: python
+ # Initializes the CodeQL tools for scanning.
+ - name: Initialize CodeQL
+ uses: github/codeql-action/init@v3
+ with:
+ languages: python
- - name: Perform CodeQL Analysis
- uses: github/codeql-action/analyze@v3
+ - name: Perform CodeQL Analysis
+ uses: github/codeql-action/analyze@v3
diff --git a/.gitignore b/.gitignore
index cf1f74e41c..5c6e9c86c6 100644
--- a/.gitignore
+++ b/.gitignore
@@ -383,6 +383,16 @@ cython_debug/
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
#.idea/
+### Python Patch ###
+# Poetry local configuration file - https://python-poetry.org/docs/configuration/#local-configuration
+poetry.toml
+
+# ruff
+.ruff_cache/
+
+# LSP config files
+pyrightconfig.json
+
### Vim ###
# Swap
[._]*.s[a-v][a-z]
@@ -482,6 +492,10 @@ tags
# https://plugins.jetbrains.com/plugin/12206-codestream
.idea/codestream.xml
+# Azure Toolkit for IntelliJ plugin
+# https://plugins.jetbrains.com/plugin/8053-azure-toolkit-for-intellij
+.idea/**/azureSettings.xml
+
### Windows ###
# Windows thumbnail cache files
Thumbs.db
diff --git a/.yamllint b/.yamllint
new file mode 100644
index 0000000000..c10d86ab19
--- /dev/null
+++ b/.yamllint
@@ -0,0 +1,52 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+extends: default
+
+ignore: |
+ /changelogs/
+
+rules:
+ line-length:
+ max: 1000
+ level: error
+ document-start: disable
+ document-end: disable
+ truthy:
+ level: error
+ allowed-values:
+ - 'true'
+ - 'false'
+ indentation:
+ spaces: 2
+ indent-sequences: true
+ key-duplicates: enable
+ trailing-spaces: enable
+ new-line-at-end-of-file: disable
+ hyphens:
+ max-spaces-after: 1
+ empty-lines:
+ max: 2
+ max-start: 0
+ max-end: 0
+ commas:
+ max-spaces-before: 0
+ min-spaces-after: 1
+ max-spaces-after: 1
+ colons:
+ max-spaces-before: 0
+ max-spaces-after: 1
+ brackets:
+ min-spaces-inside: 0
+ max-spaces-inside: 0
+ braces:
+ min-spaces-inside: 0
+ max-spaces-inside: 1
+ octal-values:
+ forbid-implicit-octal: true
+ forbid-explicit-octal: true
+ comments:
+ min-spaces-from-content: 1
+ comments-indentation: false
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 6bd354a13a..b35c52441b 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,1078 +1,5 @@
-# Community General Release Notes
+# Placeholder changelog
-**Topics**
-
-- v10\.6\.0
- - Release Summary
- - Minor Changes
- - Deprecated Features
- - Bugfixes
- - Known Issues
- - New Plugins
- - Connection
-- v10\.5\.0
- - Release Summary
- - Minor Changes
- - Bugfixes
- - New Modules
-- v10\.4\.0
- - Release Summary
- - Minor Changes
- - Deprecated Features
- - Bugfixes
- - New Modules
-- v10\.3\.1
- - Release Summary
- - Minor Changes
- - Bugfixes
-- v10\.3\.0
- - Release Summary
- - Minor Changes
- - Deprecated Features
- - Security Fixes
- - Bugfixes
- - New Plugins
- - Connection
- - Filter
- - Lookup
- - New Modules
-- v10\.2\.0
- - Release Summary
- - Minor Changes
- - Deprecated Features
- - Security Fixes
- - Bugfixes
- - New Plugins
- - Inventory
- - New Modules
-- v10\.1\.0
- - Release Summary
- - Minor Changes
- - Deprecated Features
- - Bugfixes
- - New Plugins
- - Filter
- - New Modules
-- v10\.0\.1
- - Release Summary
- - Bugfixes
-- v10\.0\.0
- - Release Summary
- - Minor Changes
- - Breaking Changes / Porting Guide
- - Deprecated Features
- - Removed Features \(previously deprecated\)
- - Bugfixes
- - Known Issues
- - New Plugins
- - Filter
- - Test
- - New Modules
-This changelog describes changes after version 9\.0\.0\.
-
-
-## v10\.6\.0
-
-
-### Release Summary
-
-Regular bugfix and feature release\.
-
-
-### Minor Changes
-
-* apache2\_module \- added workaround for new PHP module name\, from php7\_module
to php\_module
\([https\://github\.com/ansible\-collections/community\.general/pull/9951](https\://github\.com/ansible\-collections/community\.general/pull/9951)\)\.
-* gitlab\_project \- add option build\_timeout
\([https\://github\.com/ansible\-collections/community\.general/pull/9960](https\://github\.com/ansible\-collections/community\.general/pull/9960)\)\.
-* gitlab\_project\_members \- extend choices parameter access\_level
by missing upstream valid value owner
\([https\://github\.com/ansible\-collections/community\.general/pull/9953](https\://github\.com/ansible\-collections/community\.general/pull/9953)\)\.
-* hpilo\_boot \- add option to get an idempotent behavior while powering on server\, resulting in success instead of failure when using state\: boot\_once
option \([https\://github\.com/ansible\-collections/community\.general/pull/9646](https\://github\.com/ansible\-collections/community\.general/pull/9646)\)\.
-* idrac\_redfish\_command\, idrac\_redfish\_config\, idrac\_redfish\_info \- add validate\_certs
\, ca\_path
\, and ciphers
options to configure TLS/SSL \([https\://github\.com/ansible\-collections/community\.general/issues/3686](https\://github\.com/ansible\-collections/community\.general/issues/3686)\, [https\://github\.com/ansible\-collections/community\.general/pull/9964](https\://github\.com/ansible\-collections/community\.general/pull/9964)\)\.
-* ilo\_redfish\_command\, ilo\_redfish\_config\, ilo\_redfish\_info \- add validate\_certs
\, ca\_path
\, and ciphers
options to configure TLS/SSL \([https\://github\.com/ansible\-collections/community\.general/issues/3686](https\://github\.com/ansible\-collections/community\.general/issues/3686)\, [https\://github\.com/ansible\-collections/community\.general/pull/9964](https\://github\.com/ansible\-collections/community\.general/pull/9964)\)\.
-* keycloak module\_utils \- user groups can now be referenced by their name\, like staff
\, or their path\, like /staff/engineering
\. The path syntax allows users to reference subgroups\, which is not possible otherwise \([https\://github\.com/ansible\-collections/community\.general/pull/9898](https\://github\.com/ansible\-collections/community\.general/pull/9898)\)\.
-* keycloak\_user module \- user groups can now be referenced by their name\, like staff
\, or their path\, like /staff/engineering
\. The path syntax allows users to reference subgroups\, which is not possible otherwise \([https\://github\.com/ansible\-collections/community\.general/pull/9898](https\://github\.com/ansible\-collections/community\.general/pull/9898)\)\.
-* nmcli \- add support for Infiniband MAC setting when type
is infiniband
\([https\://github\.com/ansible\-collections/community\.general/pull/9962](https\://github\.com/ansible\-collections/community\.general/pull/9962)\)\.
-* one\_vm \- update allowed values for updateconf
to include new parameters as per the latest OpenNebula API documentation\.
- Added parameters\:
-
- - OS
\: FIRMWARE
\;
- - CPU\_MODEL
\: MODEL
\, FEATURES
\;
- - FEATURES
\: VIRTIO\_BLK\_QUEUES
\, VIRTIO\_SCSI\_QUEUES
\, IOTHREADS
\;
- - GRAPHICS
\: PORT
\, COMMAND
\;
- - VIDEO
\: ATS
\, IOMMU
\, RESOLUTION
\, TYPE
\, VRAM
\;
- - RAW
\: VALIDATE
\;
- - BACKUP\_CONFIG
\: FS\_FREEZE
\, KEEP\_LAST
\, BACKUP\_VOLATILE
\, MODE
\, INCREMENT\_MODE
\.
-
- \([https\://github\.com/ansible\-collections/community\.general/pull/9959](https\://github\.com/ansible\-collections/community\.general/pull/9959)\)\.
-* proxmox and proxmox\_kvm modules \- allow uppercase characters in VM/container tags \([https\://github\.com/ansible\-collections/community\.general/issues/9895](https\://github\.com/ansible\-collections/community\.general/issues/9895)\, [https\://github\.com/ansible\-collections/community\.general/pull/10024](https\://github\.com/ansible\-collections/community\.general/pull/10024)\)\.
-* puppet \- improve parameter formatting\, no impact to user \([https\://github\.com/ansible\-collections/community\.general/pull/10014](https\://github\.com/ansible\-collections/community\.general/pull/10014)\)\.
-* redfish module utils \- add REDFISH\_COMMON\_ARGUMENT\_SPEC
\, a corresponding redfish
docs fragment\, and support for its validate\_certs
\, ca\_path
\, and ciphers
options \([https\://github\.com/ansible\-collections/community\.general/issues/3686](https\://github\.com/ansible\-collections/community\.general/issues/3686)\, [https\://github\.com/ansible\-collections/community\.general/pull/9964](https\://github\.com/ansible\-collections/community\.general/pull/9964)\)\.
-* redfish\_command\, redfish\_config\, redfish\_info \- add validate\_certs
and ca\_path
options to configure TLS/SSL \([https\://github\.com/ansible\-collections/community\.general/issues/3686](https\://github\.com/ansible\-collections/community\.general/issues/3686)\, [https\://github\.com/ansible\-collections/community\.general/pull/9964](https\://github\.com/ansible\-collections/community\.general/pull/9964)\)\.
-* rocketchat \- fix duplicate JSON conversion for Rocket\.Chat \< 7\.4\.0 \([https\://github\.com/ansible\-collections/community\.general/pull/9965](https\://github\.com/ansible\-collections/community\.general/pull/9965)\)\.
-* wdc\_redfish\_command\, wdc\_redfish\_info \- add validate\_certs
\, ca\_path
\, and ciphers
options to configure TLS/SSL \([https\://github\.com/ansible\-collections/community\.general/issues/3686](https\://github\.com/ansible\-collections/community\.general/issues/3686)\, [https\://github\.com/ansible\-collections/community\.general/pull/9964](https\://github\.com/ansible\-collections/community\.general/pull/9964)\)\.
-* xcc\_redfish\_command \- add validate\_certs
\, ca\_path
\, and ciphers
options to configure TLS/SSL \([https\://github\.com/ansible\-collections/community\.general/issues/3686](https\://github\.com/ansible\-collections/community\.general/issues/3686)\, [https\://github\.com/ansible\-collections/community\.general/pull/9964](https\://github\.com/ansible\-collections/community\.general/pull/9964)\)\.
-* zypper \- adds skip\_post\_errors
that allows to skip RPM post\-install errors \(Zypper return code 107\) \([https\://github\.com/ansible\-collections/community\.general/issues/9972](https\://github\.com/ansible\-collections/community\.general/issues/9972)\)\.
-
-
-### Deprecated Features
-
-* manifold lookup plugin \- plugin is deprecated and will be removed in community\.general 11\.0\.0 \([https\://github\.com/ansible\-collections/community\.general/pull/10028](https\://github\.com/ansible\-collections/community\.general/pull/10028)\)\.
-* stackpath\_compute inventory plugin \- plugin is deprecated and will be removed in community\.general 11\.0\.0 \([https\://github\.com/ansible\-collections/community\.general/pull/10026](https\://github\.com/ansible\-collections/community\.general/pull/10026)\)\.
-
-
-### Bugfixes
-
-* dependent look plugin \- make compatible with ansible\-core\'s Data Tagging feature \([https\://github\.com/ansible\-collections/community\.general/pull/9833](https\://github\.com/ansible\-collections/community\.general/pull/9833)\)\.
-* diy callback plugin \- make compatible with ansible\-core\'s Data Tagging feature \([https\://github\.com/ansible\-collections/community\.general/pull/9833](https\://github\.com/ansible\-collections/community\.general/pull/9833)\)\.
-* github\_deploy\_key \- check that key really exists on 422 to avoid masking other errors \([https\://github\.com/ansible\-collections/community\.general/issues/6718](https\://github\.com/ansible\-collections/community\.general/issues/6718)\, [https\://github\.com/ansible\-collections/community\.general/pull/10011](https\://github\.com/ansible\-collections/community\.general/pull/10011)\)\.
-* hashids and unicode\_normalize filter plugins \- avoid deprecated AnsibleFilterTypeError
on ansible\-core 2\.19 \([https\://github\.com/ansible\-collections/community\.general/pull/9992](https\://github\.com/ansible\-collections/community\.general/pull/9992)\)\.
-* homebrew \- emit a useful error message if brew info
reports a package tap is null
\([https\://github\.com/ansible\-collections/community\.general/pull/10013](https\://github\.com/ansible\-collections/community\.general/pull/10013)\, [https\://github\.com/ansible\-collections/community\.general/issues/10012](https\://github\.com/ansible\-collections/community\.general/issues/10012)\)\.
-* java\_cert \- the module no longer fails if the optional parameters pkcs12\_alias
and cert\_alias
are not provided \([https\://github\.com/ansible\-collections/community\.general/pull/9970](https\://github\.com/ansible\-collections/community\.general/pull/9970)\)\.
-* keycloak\_authentication \- fix authentification config duplication for Keycloak \< 26\.2\.0 \([https\://github\.com/ansible\-collections/community\.general/pull/9987](https\://github\.com/ansible\-collections/community\.general/pull/9987)\)\.
-* keycloak\_client \- fix the idempotency regression by normalizing the Keycloak response for after\_client
\([https\://github\.com/ansible\-collections/community\.general/issues/9905](https\://github\.com/ansible\-collections/community\.general/issues/9905)\, [https\://github\.com/ansible\-collections/community\.general/pull/9976](https\://github\.com/ansible\-collections/community\.general/pull/9976)\)\.
-* proxmox inventory plugin \- fix ansible\_host
staying empty for certain Proxmox nodes \([https\://github\.com/ansible\-collections/community\.general/issues/5906](https\://github\.com/ansible\-collections/community\.general/issues/5906)\, [https\://github\.com/ansible\-collections/community\.general/pull/9952](https\://github\.com/ansible\-collections/community\.general/pull/9952)\)\.
-* proxmox\_disk \- fail gracefully if storage
is required but not provided by the user \([https\://github\.com/ansible\-collections/community\.general/issues/9941](https\://github\.com/ansible\-collections/community\.general/issues/9941)\, [https\://github\.com/ansible\-collections/community\.general/pull/9963](https\://github\.com/ansible\-collections/community\.general/pull/9963)\)\.
-* reveal\_ansible\_type filter plugin and ansible\_type test plugin \- make compatible with ansible\-core\'s Data Tagging feature \([https\://github\.com/ansible\-collections/community\.general/pull/9833](https\://github\.com/ansible\-collections/community\.general/pull/9833)\)\.
-* sysrc \- no longer always reporting changed\=true
when state\=absent
\. This fixes the method exists\(\)
\([https\://github\.com/ansible\-collections/community\.general/issues/10004](https\://github\.com/ansible\-collections/community\.general/issues/10004)\, [https\://github\.com/ansible\-collections/community\.general/pull/10005](https\://github\.com/ansible\-collections/community\.general/pull/10005)\)\.
-* yaml callback plugin \- use ansible\-core internals to avoid breakage with Data Tagging \([https\://github\.com/ansible\-collections/community\.general/pull/9833](https\://github\.com/ansible\-collections/community\.general/pull/9833)\)\.
-
-
-### Known Issues
-
-* reveal\_ansible\_type filter plugin and ansible\_type test plugin \- note that ansible\-core\'s Data Tagging feature implements new aliases\, such as \_AnsibleTaggedStr
for str
\, \_AnsibleTaggedInt
for int
\, and \_AnsibleTaggedFloat
for float
\([https\://github\.com/ansible\-collections/community\.general/pull/9833](https\://github\.com/ansible\-collections/community\.general/pull/9833)\)\.
-
-
-### New Plugins
-
-
-#### Connection
-
-* community\.general\.wsl \- Run tasks in WSL distribution using wsl\.exe CLI via SSH\.
-
-
-## v10\.5\.0
-
-
-### Release Summary
-
-Regular bugfix and feature release\.
-
-
-### Minor Changes
-
-* CmdRunner module utils \- the convenience method cmd\_runner\_fmt\.as\_fixed\(\)
now accepts multiple arguments as a list \([https\://github\.com/ansible\-collections/community\.general/pull/9893](https\://github\.com/ansible\-collections/community\.general/pull/9893)\)\.
-* apache2\_mod\_proxy \- code simplification\, no change in functionality \([https\://github\.com/ansible\-collections/community\.general/pull/9457](https\://github\.com/ansible\-collections/community\.general/pull/9457)\)\.
-* consul\_token \- fix idempotency when policies
or roles
are supplied by name \([https\://github\.com/ansible\-collections/community\.general/issues/9841](https\://github\.com/ansible\-collections/community\.general/issues/9841)\, [https\://github\.com/ansible\-collections/community\.general/pull/9845](https\://github\.com/ansible\-collections/community\.general/pull/9845)\)\.
-* keycloak\_realm \- remove ID requirement when creating a realm to allow Keycloak generating its own realm ID \([https\://github\.com/ansible\-collections/community\.general/pull/9768](https\://github\.com/ansible\-collections/community\.general/pull/9768)\)\.
-* nmap inventory plugin \- adds dns\_servers
option for specifying DNS servers for name resolution\. Accepts hostnames or IP addresses in the same format as the exclude
option \([https\://github\.com/ansible\-collections/community\.general/pull/9849](https\://github\.com/ansible\-collections/community\.general/pull/9849)\)\.
-* proxmox\_kvm \- add missing audio hardware device handling \([https\://github\.com/ansible\-collections/community\.general/issues/5192](https\://github\.com/ansible\-collections/community\.general/issues/5192)\, [https\://github\.com/ansible\-collections/community\.general/pull/9847](https\://github\.com/ansible\-collections/community\.general/pull/9847)\)\.
-* redfish\_config \- add command SetPowerRestorePolicy
to set the desired power state of the system when power is restored \([https\://github\.com/ansible\-collections/community\.general/pull/9837](https\://github\.com/ansible\-collections/community\.general/pull/9837)\)\.
-* redfish\_info \- add command GetPowerRestorePolicy
to get the desired power state of the system when power is restored \([https\://github\.com/ansible\-collections/community\.general/pull/9824](https\://github\.com/ansible\-collections/community\.general/pull/9824)\)\.
-* rocketchat \- option is\_pre740
has been added to control the format of the payload\. For Rocket\.Chat 7\.4\.0 or newer\, it must be set to false
\([https\://github\.com/ansible\-collections/community\.general/pull/9882](https\://github\.com/ansible\-collections/community\.general/pull/9882)\)\.
-* slack callback plugin \- add http\_agent
option to enable the user to set a custom user agent for slack callback plugin \([https\://github\.com/ansible\-collections/community\.general/issues/9813](https\://github\.com/ansible\-collections/community\.general/issues/9813)\, [https\://github\.com/ansible\-collections/community\.general/pull/9836](https\://github\.com/ansible\-collections/community\.general/pull/9836)\)\.
-* systemd\_info \- add wildcard expression support in unitname
option \([https\://github\.com/ansible\-collections/community\.general/pull/9821](https\://github\.com/ansible\-collections/community\.general/pull/9821)\)\.
-* systemd\_info \- extend support to timer units \([https\://github\.com/ansible\-collections/community\.general/pull/9891](https\://github\.com/ansible\-collections/community\.general/pull/9891)\)\.
-* vmadm \- add new options flexible\_disk\_size
and owner\_uuid
\([https\://github\.com/ansible\-collections/community\.general/pull/9892](https\://github\.com/ansible\-collections/community\.general/pull/9892)\)\.
-
-
-### Bugfixes
-
-* cloudlare\_dns \- handle exhausted response stream in case of HTTP errors to show nice error message to the user \([https\://github\.com/ansible\-collections/community\.general/issues/9782](https\://github\.com/ansible\-collections/community\.general/issues/9782)\, [https\://github\.com/ansible\-collections/community\.general/pull/9818](https\://github\.com/ansible\-collections/community\.general/pull/9818)\)\.
-* dnf\_versionlock \- add support for dnf5 \([https\://github\.com/ansible\-collections/community\.general/issues/9556](https\://github\.com/ansible\-collections/community\.general/issues/9556)\)\.
-* homebrew \- fix crash when package names include tap \([https\://github\.com/ansible\-collections/community\.general/issues/9777](https\://github\.com/ansible\-collections/community\.general/issues/9777)\, [https\://github\.com/ansible\-collections/community\.general/pull/9803](https\://github\.com/ansible\-collections/community\.general/pull/9803)\)\.
-* homebrew\_cask \- handle unusual brew version strings \([https\://github\.com/ansible\-collections/community\.general/issues/8432](https\://github\.com/ansible\-collections/community\.general/issues/8432)\, [https\://github\.com/ansible\-collections/community\.general/pull/9881](https\://github\.com/ansible\-collections/community\.general/pull/9881)\)\.
-* nmcli \- enable changing only the order of DNS servers or search suffixes \([https\://github\.com/ansible\-collections/community\.general/issues/8724](https\://github\.com/ansible\-collections/community\.general/issues/8724)\, [https\://github\.com/ansible\-collections/community\.general/pull/9880](https\://github\.com/ansible\-collections/community\.general/pull/9880)\)\.
-* proxmox \- add missing key selection of \'status\'
key to get\_lxc\_status
\([https\://github\.com/ansible\-collections/community\.general/issues/9696](https\://github\.com/ansible\-collections/community\.general/issues/9696)\, [https\://github\.com/ansible\-collections/community\.general/pull/9809](https\://github\.com/ansible\-collections/community\.general/pull/9809)\)\.
-* proxmox\_vm\_info \- the module no longer expects that the key template
exists in a dictionary returned by Proxmox \([https\://github\.com/ansible\-collections/community\.general/issues/9875](https\://github\.com/ansible\-collections/community\.general/issues/9875)\, [https\://github\.com/ansible\-collections/community\.general/pull/9910](https\://github\.com/ansible\-collections/community\.general/pull/9910)\)\.
-* sudoers \- display stdout and stderr raised while failed validation \([https\://github\.com/ansible\-collections/community\.general/issues/9674](https\://github\.com/ansible\-collections/community\.general/issues/9674)\, [https\://github\.com/ansible\-collections/community\.general/pull/9871](https\://github\.com/ansible\-collections/community\.general/pull/9871)\)\.
-
-
-### New Modules
-
-* community\.general\.pacemaker\_resource \- Manage pacemaker resources\.
-
-
-## v10\.4\.0
-
-
-### Release Summary
-
-Regular bugfix and feature release\.
-
-
-### Minor Changes
-
-* bitwarden lookup plugin \- add new option collection\_name
to filter results by collection name\, and new option result\_count
to validate number of results \([https\://github\.com/ansible\-collections/community\.general/pull/9728](https\://github\.com/ansible\-collections/community\.general/pull/9728)\)\.
-* incus connection plugin \- adds remote\_user
and incus\_become\_method
parameters for allowing a non\-root user to connect to an Incus instance \([https\://github\.com/ansible\-collections/community\.general/pull/9743](https\://github\.com/ansible\-collections/community\.general/pull/9743)\)\.
-* iocage inventory plugin \- the new parameter hooks\_results
of the plugin is a list of files inside a jail that provide configuration parameters for the inventory\. The inventory plugin reads the files from the jails and put the contents into the items of created variable iocage\_hooks
\([https\://github\.com/ansible\-collections/community\.general/issues/9650](https\://github\.com/ansible\-collections/community\.general/issues/9650)\, [https\://github\.com/ansible\-collections/community\.general/pull/9651](https\://github\.com/ansible\-collections/community\.general/pull/9651)\)\.
-* jira \- adds client\_cert
and client\_key
parameters for supporting client certificate authentification when connecting to Jira \([https\://github\.com/ansible\-collections/community\.general/pull/9753](https\://github\.com/ansible\-collections/community\.general/pull/9753)\)\.
-* lldp \- adds multivalues
parameter to control behavior when lldpctl outputs an attribute multiple times \([https\://github\.com/ansible\-collections/community\.general/pull/9657](https\://github\.com/ansible\-collections/community\.general/pull/9657)\)\.
-* lvg \- add remove\_extra\_pvs
parameter to control if ansible should remove physical volumes which are not in the pvs
parameter \([https\://github\.com/ansible\-collections/community\.general/pull/9698](https\://github\.com/ansible\-collections/community\.general/pull/9698)\)\.
-* lxd connection plugin \- adds remote\_user
and lxd\_become\_method
parameters for allowing a non\-root user to connect to an LXD instance \([https\://github\.com/ansible\-collections/community\.general/pull/9659](https\://github\.com/ansible\-collections/community\.general/pull/9659)\)\.
-* nmcli \- adds VRF support with new type
value vrf
and new slave\_type
value vrf
as well as new table
parameter \([https\://github\.com/ansible\-collections/community\.general/pull/9658](https\://github\.com/ansible\-collections/community\.general/pull/9658)\, [https\://github\.com/ansible\-collections/community\.general/issues/8014](https\://github\.com/ansible\-collections/community\.general/issues/8014)\)\.
-* proxmox\_kvm \- allow hibernation and suspending of VMs \([https\://github\.com/ansible\-collections/community\.general/issues/9620](https\://github\.com/ansible\-collections/community\.general/issues/9620)\, [https\://github\.com/ansible\-collections/community\.general/pull/9653](https\://github\.com/ansible\-collections/community\.general/pull/9653)\)\.
-* redfish\_command \- add PowerFullPowerCycle
to power command options \([https\://github\.com/ansible\-collections/community\.general/pull/9729](https\://github\.com/ansible\-collections/community\.general/pull/9729)\)\.
-* ssh\_config \- add other\_options
option \([https\://github\.com/ansible\-collections/community\.general/issues/8053](https\://github\.com/ansible\-collections/community\.general/issues/8053)\, [https\://github\.com/ansible\-collections/community\.general/pull/9684](https\://github\.com/ansible\-collections/community\.general/pull/9684)\)\.
-* xen\_orchestra inventory plugin \- add use\_vm\_uuid
and use\_host\_uuid
boolean options to allow switching over to using VM/Xen name labels instead of UUIDs as item names \([https\://github\.com/ansible\-collections/community\.general/pull/9787](https\://github\.com/ansible\-collections/community\.general/pull/9787)\)\.
-
-
-### Deprecated Features
-
-* profitbricks \- module is deprecated and will be removed in community\.general 11\.0\.0 \([https\://github\.com/ansible\-collections/community\.general/pull/9733](https\://github\.com/ansible\-collections/community\.general/pull/9733)\)\.
-* profitbricks\_datacenter \- module is deprecated and will be removed in community\.general 11\.0\.0 \([https\://github\.com/ansible\-collections/community\.general/pull/9733](https\://github\.com/ansible\-collections/community\.general/pull/9733)\)\.
-* profitbricks\_nic \- module is deprecated and will be removed in community\.general 11\.0\.0 \([https\://github\.com/ansible\-collections/community\.general/pull/9733](https\://github\.com/ansible\-collections/community\.general/pull/9733)\)\.
-* profitbricks\_volume \- module is deprecated and will be removed in community\.general 11\.0\.0 \([https\://github\.com/ansible\-collections/community\.general/pull/9733](https\://github\.com/ansible\-collections/community\.general/pull/9733)\)\.
-* profitbricks\_volume\_attachments \- module is deprecated and will be removed in community\.general 11\.0\.0 \([https\://github\.com/ansible\-collections/community\.general/pull/9733](https\://github\.com/ansible\-collections/community\.general/pull/9733)\)\.
-
-
-### Bugfixes
-
-* apache2\_mod\_proxy \- make compatible with Python 3 \([https\://github\.com/ansible\-collections/community\.general/pull/9762](https\://github\.com/ansible\-collections/community\.general/pull/9762)\)\.
-* apache2\_mod\_proxy \- passing the cluster\'s page as referer for the member\'s pages\. This makes the module actually work again for halfway modern Apache versions\. According to some comments founds on the net the referer was required since at least 2019 for some versions of Apache 2 \([https\://github\.com/ansible\-collections/community\.general/pull/9762](https\://github\.com/ansible\-collections/community\.general/pull/9762)\)\.
-* elasticsearch\_plugin \- fix ERROR\: D is not a recognized option
issue when configuring proxy settings \([https\://github\.com/ansible\-collections/community\.general/pull/9774](https\://github\.com/ansible\-collections/community\.general/pull/9774)\, [https\://github\.com/ansible\-collections/community\.general/issues/9773](https\://github\.com/ansible\-collections/community\.general/issues/9773)\)\.
-* ipa\_host \- module revoked existing host certificates even if user\_certificate
was not given \([https\://github\.com/ansible\-collections/community\.general/pull/9694](https\://github\.com/ansible\-collections/community\.general/pull/9694)\)\.
-* keycloak\_client \- in check mode\, detect whether the lists in before client \(for example redirect URI list\) contain items that the lists in the desired client do not contain \([https\://github\.com/ansible\-collections/community\.general/pull/9739](https\://github\.com/ansible\-collections/community\.general/pull/9739)\)\.
-* lldp \- fix crash caused by certain lldpctl output where an attribute is defined as branch and leaf \([https\://github\.com/ansible\-collections/community\.general/pull/9657](https\://github\.com/ansible\-collections/community\.general/pull/9657)\)\.
-* onepassword\_doc lookup plugin \- ensure that 1Password Connect support also works for this plugin \([https\://github\.com/ansible\-collections/community\.general/pull/9625](https\://github\.com/ansible\-collections/community\.general/pull/9625)\)\.
-* passwordstore lookup plugin \- fix subkey creation even when create\=false
\([https\://github\.com/ansible\-collections/community\.general/issues/9105](https\://github\.com/ansible\-collections/community\.general/issues/9105)\, [https\://github\.com/ansible\-collections/community\.general/pull/9106](https\://github\.com/ansible\-collections/community\.general/pull/9106)\)\.
-* proxmox inventory plugin \- plugin did not update cache correctly after meta\: refresh\_inventory
\([https\://github\.com/ansible\-collections/community\.general/issues/9710](https\://github\.com/ansible\-collections/community\.general/issues/9710)\, [https\://github\.com/ansible\-collections/community\.general/pull/9760](https\://github\.com/ansible\-collections/community\.general/pull/9760)\)\.
-* redhat\_subscription \- use the \"enable\_content\" option \(when available\) when
- registering using D\-Bus\, to ensure that subscription\-manager enables the
- content on registration\; this is particular important on EL 10\+ and Fedora
- 41\+
- \([https\://github\.com/ansible\-collections/community\.general/pull/9778](https\://github\.com/ansible\-collections/community\.general/pull/9778)\)\.
-* zfs \- fix handling of multi\-line values of user\-defined ZFS properties \([https\://github\.com/ansible\-collections/community\.general/pull/6264](https\://github\.com/ansible\-collections/community\.general/pull/6264)\)\.
-* zfs\_facts \- parameter type
now accepts multple values as documented \([https\://github\.com/ansible\-collections/community\.general/issues/5909](https\://github\.com/ansible\-collections/community\.general/issues/5909)\, [https\://github\.com/ansible\-collections/community\.general/pull/9697](https\://github\.com/ansible\-collections/community\.general/pull/9697)\)\.
-
-
-### New Modules
-
-* community\.general\.systemd\_info \- Gather C\(systemd\) unit info\.
-
-
-## v10\.3\.1
-
-
-### Release Summary
-
-Bugfix release\.
-
-
-### Minor Changes
-
-* onepassword\_ssh\_key \- refactor to move code to lookup class \([https\://github\.com/ansible\-collections/community\.general/pull/9633](https\://github\.com/ansible\-collections/community\.general/pull/9633)\)\.
-
-
-### Bugfixes
-
-* cloudflare\_dns \- fix crash when deleting a DNS record or when updating a record with solo\=true
\([https\://github\.com/ansible\-collections/community\.general/issues/9652](https\://github\.com/ansible\-collections/community\.general/issues/9652)\, [https\://github\.com/ansible\-collections/community\.general/pull/9649](https\://github\.com/ansible\-collections/community\.general/pull/9649)\)\.
-* homebrew \- make package name parsing more resilient \([https\://github\.com/ansible\-collections/community\.general/pull/9665](https\://github\.com/ansible\-collections/community\.general/pull/9665)\, [https\://github\.com/ansible\-collections/community\.general/issues/9641](https\://github\.com/ansible\-collections/community\.general/issues/9641)\)\.
-* keycloak module utils \- replaces missing return in get\_role\_composites method which caused it to return None instead of composite roles \([https\://github\.com/ansible\-collections/community\.general/issues/9678](https\://github\.com/ansible\-collections/community\.general/issues/9678)\, [https\://github\.com/ansible\-collections/community\.general/pull/9691](https\://github\.com/ansible\-collections/community\.general/pull/9691)\)\.
-* keycloak\_client \- fix and improve existing tests\. The module showed a diff without actual changes\, solved by improving the normalise\_cr\(\)
function \([https\://github\.com/ansible\-collections/community\.general/pull/9644](https\://github\.com/ansible\-collections/community\.general/pull/9644)\)\.
-* proxmox \- adds the pubkey
parameter \(back to\) the update
state \([https\://github\.com/ansible\-collections/community\.general/issues/9642](https\://github\.com/ansible\-collections/community\.general/issues/9642)\, [https\://github\.com/ansible\-collections/community\.general/pull/9645](https\://github\.com/ansible\-collections/community\.general/pull/9645)\)\.
-* proxmox \- fixes a typo in the translation of the pubkey
parameter to proxmox\' ssh\-public\-keys
\([https\://github\.com/ansible\-collections/community\.general/issues/9642](https\://github\.com/ansible\-collections/community\.general/issues/9642)\, [https\://github\.com/ansible\-collections/community\.general/pull/9645](https\://github\.com/ansible\-collections/community\.general/pull/9645)\)\.
-* xml \- ensure file descriptor is closed \([https\://github\.com/ansible\-collections/community\.general/pull/9695](https\://github\.com/ansible\-collections/community\.general/pull/9695)\)\.
-
-
-## v10\.3\.0
-
-
-### Release Summary
-
-Regular bugfix and feature release\.
-
-
-### Minor Changes
-
-* MH module utils \- delegate debug
to the underlying AnsibleModule
instance or issues a warning if an attribute already exists with that name \([https\://github\.com/ansible\-collections/community\.general/pull/9577](https\://github\.com/ansible\-collections/community\.general/pull/9577)\)\.
-* apache2\_mod\_proxy \- better handling regexp extraction \([https\://github\.com/ansible\-collections/community\.general/pull/9609](https\://github\.com/ansible\-collections/community\.general/pull/9609)\)\.
-* apache2\_mod\_proxy \- change type of state
to a list of strings\. No change for the users \([https\://github\.com/ansible\-collections/community\.general/pull/9600](https\://github\.com/ansible\-collections/community\.general/pull/9600)\)\.
-* apache2\_mod\_proxy \- improve readability when using results from fecth\_url\(\)
\([https\://github\.com/ansible\-collections/community\.general/pull/9608](https\://github\.com/ansible\-collections/community\.general/pull/9608)\)\.
-* apache2\_mod\_proxy \- refactor repeated code into method \([https\://github\.com/ansible\-collections/community\.general/pull/9599](https\://github\.com/ansible\-collections/community\.general/pull/9599)\)\.
-* apache2\_mod\_proxy \- remove unused parameter and code from Balancer
constructor \([https\://github\.com/ansible\-collections/community\.general/pull/9614](https\://github\.com/ansible\-collections/community\.general/pull/9614)\)\.
-* apache2\_mod\_proxy \- simplified and improved string manipulation \([https\://github\.com/ansible\-collections/community\.general/pull/9614](https\://github\.com/ansible\-collections/community\.general/pull/9614)\)\.
-* apache2\_mod\_proxy \- use deps
to handle dependencies \([https\://github\.com/ansible\-collections/community\.general/pull/9612](https\://github\.com/ansible\-collections/community\.general/pull/9612)\)\.
-* cgroup\_memory\_recap callback plugin \- adjust standard preamble for Python 3 \([https\://github\.com/ansible\-collections/community\.general/pull/9583](https\://github\.com/ansible\-collections/community\.general/pull/9583)\)\.
-* chroot connection plugin \- adjust standard preamble for Python 3 \([https\://github\.com/ansible\-collections/community\.general/pull/9584](https\://github\.com/ansible\-collections/community\.general/pull/9584)\)\.
-* cloud\_init\_data\_facts \- open file using open\(\)
as a context manager \([https\://github\.com/ansible\-collections/community\.general/pull/9579](https\://github\.com/ansible\-collections/community\.general/pull/9579)\)\.
-* cobbler inventory plugin \- adjust standard preamble for Python 3 \([https\://github\.com/ansible\-collections/community\.general/pull/9584](https\://github\.com/ansible\-collections/community\.general/pull/9584)\)\.
-* context\_demo callback plugin \- adjust standard preamble for Python 3 \([https\://github\.com/ansible\-collections/community\.general/pull/9583](https\://github\.com/ansible\-collections/community\.general/pull/9583)\)\.
-* counter filter plugin \- adjust standard preamble for Python 3 \([https\://github\.com/ansible\-collections/community\.general/pull/9585](https\://github\.com/ansible\-collections/community\.general/pull/9585)\)\.
-* counter\_enabled callback plugin \- adjust standard preamble for Python 3 \([https\://github\.com/ansible\-collections/community\.general/pull/9583](https\://github\.com/ansible\-collections/community\.general/pull/9583)\)\.
-* cpanm \- enable usage of option \-\-with\-recommends
\([https\://github\.com/ansible\-collections/community\.general/issues/9554](https\://github\.com/ansible\-collections/community\.general/issues/9554)\, [https\://github\.com/ansible\-collections/community\.general/pull/9555](https\://github\.com/ansible\-collections/community\.general/pull/9555)\)\.
-* cpanm \- enable usage of option \-\-with\-suggests
\([https\://github\.com/ansible\-collections/community\.general/pull/9555](https\://github\.com/ansible\-collections/community\.general/pull/9555)\)\.
-* crc32 filter plugin \- adjust standard preamble for Python 3 \([https\://github\.com/ansible\-collections/community\.general/pull/9585](https\://github\.com/ansible\-collections/community\.general/pull/9585)\)\.
-* cronvar \- open file using open\(\)
as a context manager \([https\://github\.com/ansible\-collections/community\.general/pull/9579](https\://github\.com/ansible\-collections/community\.general/pull/9579)\)\.
-* crypttab \- open file using open\(\)
as a context manager \([https\://github\.com/ansible\-collections/community\.general/pull/9579](https\://github\.com/ansible\-collections/community\.general/pull/9579)\)\.
-* default\_without\_diff callback plugin \- adjust standard preamble for Python 3 \([https\://github\.com/ansible\-collections/community\.general/pull/9583](https\://github\.com/ansible\-collections/community\.general/pull/9583)\)\.
-* dense callback plugin \- adjust standard preamble for Python 3 \([https\://github\.com/ansible\-collections/community\.general/pull/9583](https\://github\.com/ansible\-collections/community\.general/pull/9583)\)\.
-* dict filter plugin \- adjust standard preamble for Python 3 \([https\://github\.com/ansible\-collections/community\.general/pull/9585](https\://github\.com/ansible\-collections/community\.general/pull/9585)\)\.
-* dict\_kv filter plugin \- adjust standard preamble for Python 3 \([https\://github\.com/ansible\-collections/community\.general/pull/9585](https\://github\.com/ansible\-collections/community\.general/pull/9585)\)\.
-* diy callback plugin \- adjust standard preamble for Python 3 \([https\://github\.com/ansible\-collections/community\.general/pull/9583](https\://github\.com/ansible\-collections/community\.general/pull/9583)\)\.
-* doas become plugin \- adjust standard preamble for Python 3 \([https\://github\.com/ansible\-collections/community\.general/pull/9583](https\://github\.com/ansible\-collections/community\.general/pull/9583)\)\.
-* dzdo become plugin \- adjust standard preamble for Python 3 \([https\://github\.com/ansible\-collections/community\.general/pull/9583](https\://github\.com/ansible\-collections/community\.general/pull/9583)\)\.
-* elastic callback plugin \- adjust standard preamble for Python 3 \([https\://github\.com/ansible\-collections/community\.general/pull/9583](https\://github\.com/ansible\-collections/community\.general/pull/9583)\)\.
-* from\_csv filter plugin \- adjust standard preamble for Python 3 \([https\://github\.com/ansible\-collections/community\.general/pull/9585](https\://github\.com/ansible\-collections/community\.general/pull/9585)\)\.
-* from\_ini filter plugin \- adjust standard preamble for Python 3 \([https\://github\.com/ansible\-collections/community\.general/pull/9585](https\://github\.com/ansible\-collections/community\.general/pull/9585)\)\.
-* funcd connection plugin \- adjust standard preamble for Python 3 \([https\://github\.com/ansible\-collections/community\.general/pull/9584](https\://github\.com/ansible\-collections/community\.general/pull/9584)\)\.
-* gitlab\_runners inventory plugin \- adjust standard preamble for Python 3 \([https\://github\.com/ansible\-collections/community\.general/pull/9584](https\://github\.com/ansible\-collections/community\.general/pull/9584)\)\.
-* groupby\_as\_dict filter plugin \- adjust standard preamble for Python 3 \([https\://github\.com/ansible\-collections/community\.general/pull/9585](https\://github\.com/ansible\-collections/community\.general/pull/9585)\)\.
-* hashids filter plugin \- adjust standard preamble for Python 3 \([https\://github\.com/ansible\-collections/community\.general/pull/9585](https\://github\.com/ansible\-collections/community\.general/pull/9585)\)\.
-* icinga2 inventory plugin \- adjust standard preamble for Python 3 \([https\://github\.com/ansible\-collections/community\.general/pull/9584](https\://github\.com/ansible\-collections/community\.general/pull/9584)\)\.
-* incus connection plugin \- adjust standard preamble for Python 3 \([https\://github\.com/ansible\-collections/community\.general/pull/9584](https\://github\.com/ansible\-collections/community\.general/pull/9584)\)\.
-* iocage connection plugin \- adjust standard preamble for Python 3 \([https\://github\.com/ansible\-collections/community\.general/pull/9584](https\://github\.com/ansible\-collections/community\.general/pull/9584)\)\.
-* iocage inventory plugin \- adjust standard preamble for Python 3 \([https\://github\.com/ansible\-collections/community\.general/pull/9584](https\://github\.com/ansible\-collections/community\.general/pull/9584)\)\.
-* iocage inventory plugin \- the new parameter sudo
of the plugin lets the command iocage list \-l
to run as root on the iocage host\. This is needed to get the IPv4 of a running DHCP jail \([https\://github\.com/ansible\-collections/community\.general/issues/9572](https\://github\.com/ansible\-collections/community\.general/issues/9572)\, [https\://github\.com/ansible\-collections/community\.general/pull/9573](https\://github\.com/ansible\-collections/community\.general/pull/9573)\)\.
-* iptables\_state action plugin \- adjust standard preamble for Python 3 \([https\://github\.com/ansible\-collections/community\.general/pull/9583](https\://github\.com/ansible\-collections/community\.general/pull/9583)\)\.
-* jabber callback plugin \- adjust standard preamble for Python 3 \([https\://github\.com/ansible\-collections/community\.general/pull/9583](https\://github\.com/ansible\-collections/community\.general/pull/9583)\)\.
-* jail connection plugin \- adjust standard preamble for Python 3 \([https\://github\.com/ansible\-collections/community\.general/pull/9584](https\://github\.com/ansible\-collections/community\.general/pull/9584)\)\.
-* jc filter plugin \- adjust standard preamble for Python 3 \([https\://github\.com/ansible\-collections/community\.general/pull/9585](https\://github\.com/ansible\-collections/community\.general/pull/9585)\)\.
-* jira \- transition operation now has status\_id
to directly reference wanted transition \([https\://github\.com/ansible\-collections/community\.general/pull/9602](https\://github\.com/ansible\-collections/community\.general/pull/9602)\)\.
-* json\_query filter plugin \- adjust standard preamble for Python 3 \([https\://github\.com/ansible\-collections/community\.general/pull/9585](https\://github\.com/ansible\-collections/community\.general/pull/9585)\)\.
-* keep\_keys filter plugin \- adjust standard preamble for Python 3 \([https\://github\.com/ansible\-collections/community\.general/pull/9585](https\://github\.com/ansible\-collections/community\.general/pull/9585)\)\.
-* keycloak\_\* modules \- refresh\_token
parameter added\. When multiple authentication parameters are provided \(token
\, refresh\_token
\, and auth\_username
/auth\_password
\)\, modules will now automatically retry requests upon authentication errors \(401\)\, using in order the token\, refresh token\, and username/password \([https\://github\.com/ansible\-collections/community\.general/pull/9494](https\://github\.com/ansible\-collections/community\.general/pull/9494)\)\.
-* known\_hosts \- open file using open\(\)
as a context manager \([https\://github\.com/ansible\-collections/community\.general/pull/9579](https\://github\.com/ansible\-collections/community\.general/pull/9579)\)\.
-* ksu become plugin \- adjust standard preamble for Python 3 \([https\://github\.com/ansible\-collections/community\.general/pull/9583](https\://github\.com/ansible\-collections/community\.general/pull/9583)\)\.
-* linode inventory plugin \- adjust standard preamble for Python 3 \([https\://github\.com/ansible\-collections/community\.general/pull/9584](https\://github\.com/ansible\-collections/community\.general/pull/9584)\)\.
-* lists filter plugin \- adjust standard preamble for Python 3 \([https\://github\.com/ansible\-collections/community\.general/pull/9585](https\://github\.com/ansible\-collections/community\.general/pull/9585)\)\.
-* lists\_mergeby filter plugin \- adjust standard preamble for Python 3 \([https\://github\.com/ansible\-collections/community\.general/pull/9585](https\://github\.com/ansible\-collections/community\.general/pull/9585)\)\.
-* log\_plays callback plugin \- adjust standard preamble for Python 3 \([https\://github\.com/ansible\-collections/community\.general/pull/9583](https\://github\.com/ansible\-collections/community\.general/pull/9583)\)\.
-* loganalytics callback plugin \- adjust standard preamble for Python 3 \([https\://github\.com/ansible\-collections/community\.general/pull/9583](https\://github\.com/ansible\-collections/community\.general/pull/9583)\)\.
-* logdna callback plugin \- adjust standard preamble for Python 3 \([https\://github\.com/ansible\-collections/community\.general/pull/9583](https\://github\.com/ansible\-collections/community\.general/pull/9583)\)\.
-* logentries callback plugin \- adjust standard preamble for Python 3 \([https\://github\.com/ansible\-collections/community\.general/pull/9583](https\://github\.com/ansible\-collections/community\.general/pull/9583)\)\.
-* logstash callback plugin \- adjust standard preamble for Python 3 \([https\://github\.com/ansible\-collections/community\.general/pull/9583](https\://github\.com/ansible\-collections/community\.general/pull/9583)\)\.
-* lxc connection plugin \- adjust standard preamble for Python 3 \([https\://github\.com/ansible\-collections/community\.general/pull/9584](https\://github\.com/ansible\-collections/community\.general/pull/9584)\)\.
-* lxd connection plugin \- adjust standard preamble for Python 3 \([https\://github\.com/ansible\-collections/community\.general/pull/9584](https\://github\.com/ansible\-collections/community\.general/pull/9584)\)\.
-* lxd inventory plugin \- adjust standard preamble for Python 3 \([https\://github\.com/ansible\-collections/community\.general/pull/9584](https\://github\.com/ansible\-collections/community\.general/pull/9584)\)\.
-* machinectl become plugin \- adjust standard preamble for Python 3 \([https\://github\.com/ansible\-collections/community\.general/pull/9583](https\://github\.com/ansible\-collections/community\.general/pull/9583)\)\.
-* mail callback plugin \- adjust standard preamble for Python 3 \([https\://github\.com/ansible\-collections/community\.general/pull/9583](https\://github\.com/ansible\-collections/community\.general/pull/9583)\)\.
-* memcached cache plugin \- adjust standard preamble for Python 3 \([https\://github\.com/ansible\-collections/community\.general/pull/9583](https\://github\.com/ansible\-collections/community\.general/pull/9583)\)\.
-* nmap inventory plugin \- adjust standard preamble for Python 3 \([https\://github\.com/ansible\-collections/community\.general/pull/9584](https\://github\.com/ansible\-collections/community\.general/pull/9584)\)\.
-* nmcli \- add a option fail\_over\_mac
\([https\://github\.com/ansible\-collections/community\.general/issues/9570](https\://github\.com/ansible\-collections/community\.general/issues/9570)\, [https\://github\.com/ansible\-collections/community\.general/pull/9571](https\://github\.com/ansible\-collections/community\.general/pull/9571)\)\.
-* nrdp callback plugin \- adjust standard preamble for Python 3 \([https\://github\.com/ansible\-collections/community\.general/pull/9583](https\://github\.com/ansible\-collections/community\.general/pull/9583)\)\.
-* null callback plugin \- adjust standard preamble for Python 3 \([https\://github\.com/ansible\-collections/community\.general/pull/9583](https\://github\.com/ansible\-collections/community\.general/pull/9583)\)\.
-* one\_template \- adds filter
option for retrieving templates which are not owned by the user \([https\://github\.com/ansible\-collections/community\.general/pull/9547](https\://github\.com/ansible\-collections/community\.general/pull/9547)\, [https\://github\.com/ansible\-collections/community\.general/issues/9278](https\://github\.com/ansible\-collections/community\.general/issues/9278)\)\.
-* online inventory plugin \- adjust standard preamble for Python 3 \([https\://github\.com/ansible\-collections/community\.general/pull/9584](https\://github\.com/ansible\-collections/community\.general/pull/9584)\)\.
-* opennebula inventory plugin \- adjust standard preamble for Python 3 \([https\://github\.com/ansible\-collections/community\.general/pull/9584](https\://github\.com/ansible\-collections/community\.general/pull/9584)\)\.
-* opentelemetry callback plugin \- adjust standard preamble for Python 3 \([https\://github\.com/ansible\-collections/community\.general/pull/9583](https\://github\.com/ansible\-collections/community\.general/pull/9583)\)\.
-* parted \- open file using open\(\)
as a context manager \([https\://github\.com/ansible\-collections/community\.general/pull/9579](https\://github\.com/ansible\-collections/community\.general/pull/9579)\)\.
-* pbrun become plugin \- adjust standard preamble for Python 3 \([https\://github\.com/ansible\-collections/community\.general/pull/9583](https\://github\.com/ansible\-collections/community\.general/pull/9583)\)\.
-* pfexec become plugin \- adjust standard preamble for Python 3 \([https\://github\.com/ansible\-collections/community\.general/pull/9583](https\://github\.com/ansible\-collections/community\.general/pull/9583)\)\.
-* pickle cache plugin \- adjust standard preamble for Python 3 \([https\://github\.com/ansible\-collections/community\.general/pull/9583](https\://github\.com/ansible\-collections/community\.general/pull/9583)\)\.
-* pmrun become plugin \- adjust standard preamble for Python 3 \([https\://github\.com/ansible\-collections/community\.general/pull/9583](https\://github\.com/ansible\-collections/community\.general/pull/9583)\)\.
-* proxmox \- refactors the proxmox module \([https\://github\.com/ansible\-collections/community\.general/pull/9225](https\://github\.com/ansible\-collections/community\.general/pull/9225)\)\.
-* proxmox inventory plugin \- adjust standard preamble for Python 3 \([https\://github\.com/ansible\-collections/community\.general/pull/9584](https\://github\.com/ansible\-collections/community\.general/pull/9584)\)\.
-* proxmox\_pct\_remote connection plugin \- adjust standard preamble for Python 3 \([https\://github\.com/ansible\-collections/community\.general/pull/9584](https\://github\.com/ansible\-collections/community\.general/pull/9584)\)\.
-* proxmox\_template \- add support for checksum validation with new options checksum\_algorithm
and checksum
\([https\://github\.com/ansible\-collections/community\.general/issues/9553](https\://github\.com/ansible\-collections/community\.general/issues/9553)\, [https\://github\.com/ansible\-collections/community\.general/pull/9601](https\://github\.com/ansible\-collections/community\.general/pull/9601)\)\.
-* pulp\_repo \- open file using open\(\)
as a context manager \([https\://github\.com/ansible\-collections/community\.general/pull/9579](https\://github\.com/ansible\-collections/community\.general/pull/9579)\)\.
-* qubes connection plugin \- adjust standard preamble for Python 3 \([https\://github\.com/ansible\-collections/community\.general/pull/9584](https\://github\.com/ansible\-collections/community\.general/pull/9584)\)\.
-* random\_mac filter plugin \- adjust standard preamble for Python 3 \([https\://github\.com/ansible\-collections/community\.general/pull/9585](https\://github\.com/ansible\-collections/community\.general/pull/9585)\)\.
-* redfish\_info \- add command GetAccountServiceConfig
to get full information about AccountService configuration \([https\://github\.com/ansible\-collections/community\.general/pull/9403](https\://github\.com/ansible\-collections/community\.general/pull/9403)\)\.
-* redhat\_subscription \- open file using open\(\)
as a context manager \([https\://github\.com/ansible\-collections/community\.general/pull/9579](https\://github\.com/ansible\-collections/community\.general/pull/9579)\)\.
-* redis cache plugin \- adjust standard preamble for Python 3 \([https\://github\.com/ansible\-collections/community\.general/pull/9583](https\://github\.com/ansible\-collections/community\.general/pull/9583)\)\.
-* remove\_keys filter plugin \- adjust standard preamble for Python 3 \([https\://github\.com/ansible\-collections/community\.general/pull/9585](https\://github\.com/ansible\-collections/community\.general/pull/9585)\)\.
-* replace\_keys filter plugin \- adjust standard preamble for Python 3 \([https\://github\.com/ansible\-collections/community\.general/pull/9585](https\://github\.com/ansible\-collections/community\.general/pull/9585)\)\.
-* reveal\_ansible\_type filter plugin \- adjust standard preamble for Python 3 \([https\://github\.com/ansible\-collections/community\.general/pull/9585](https\://github\.com/ansible\-collections/community\.general/pull/9585)\)\.
-* run0 become plugin \- adjust standard preamble for Python 3 \([https\://github\.com/ansible\-collections/community\.general/pull/9583](https\://github\.com/ansible\-collections/community\.general/pull/9583)\)\.
-* saltstack connection plugin \- adjust standard preamble for Python 3 \([https\://github\.com/ansible\-collections/community\.general/pull/9584](https\://github\.com/ansible\-collections/community\.general/pull/9584)\)\.
-* say callback plugin \- adjust standard preamble for Python 3 \([https\://github\.com/ansible\-collections/community\.general/pull/9583](https\://github\.com/ansible\-collections/community\.general/pull/9583)\)\.
-* scaleway inventory plugin \- adjust standard preamble for Python 3 \([https\://github\.com/ansible\-collections/community\.general/pull/9584](https\://github\.com/ansible\-collections/community\.general/pull/9584)\)\.
-* selective callback plugin \- adjust standard preamble for Python 3 \([https\://github\.com/ansible\-collections/community\.general/pull/9583](https\://github\.com/ansible\-collections/community\.general/pull/9583)\)\.
-* sesu become plugin \- adjust standard preamble for Python 3 \([https\://github\.com/ansible\-collections/community\.general/pull/9583](https\://github\.com/ansible\-collections/community\.general/pull/9583)\)\.
-* shutdown action plugin \- adjust standard preamble for Python 3 \([https\://github\.com/ansible\-collections/community\.general/pull/9583](https\://github\.com/ansible\-collections/community\.general/pull/9583)\)\.
-* slack callback plugin \- adjust standard preamble for Python 3 \([https\://github\.com/ansible\-collections/community\.general/pull/9583](https\://github\.com/ansible\-collections/community\.general/pull/9583)\)\.
-* snap \- add return value version
\([https\://github\.com/ansible\-collections/community\.general/pull/9598](https\://github\.com/ansible\-collections/community\.general/pull/9598)\)\.
-* snap\_alias \- add return value version
\([https\://github\.com/ansible\-collections/community\.general/pull/9598](https\://github\.com/ansible\-collections/community\.general/pull/9598)\)\.
-* solaris\_zone \- open file using open\(\)
as a context manager \([https\://github\.com/ansible\-collections/community\.general/pull/9579](https\://github\.com/ansible\-collections/community\.general/pull/9579)\)\.
-* sorcery \- open file using open\(\)
as a context manager \([https\://github\.com/ansible\-collections/community\.general/pull/9579](https\://github\.com/ansible\-collections/community\.general/pull/9579)\)\.
-* splunk callback plugin \- adjust standard preamble for Python 3 \([https\://github\.com/ansible\-collections/community\.general/pull/9583](https\://github\.com/ansible\-collections/community\.general/pull/9583)\)\.
-* stackpath\_compute inventory plugin \- adjust standard preamble for Python 3 \([https\://github\.com/ansible\-collections/community\.general/pull/9584](https\://github\.com/ansible\-collections/community\.general/pull/9584)\)\.
-* sudosu become plugin \- adjust standard preamble for Python 3 \([https\://github\.com/ansible\-collections/community\.general/pull/9583](https\://github\.com/ansible\-collections/community\.general/pull/9583)\)\.
-* sumologic callback plugin \- adjust standard preamble for Python 3 \([https\://github\.com/ansible\-collections/community\.general/pull/9583](https\://github\.com/ansible\-collections/community\.general/pull/9583)\)\.
-* syslog\_json callback plugin \- adjust standard preamble for Python 3 \([https\://github\.com/ansible\-collections/community\.general/pull/9583](https\://github\.com/ansible\-collections/community\.general/pull/9583)\)\.
-* time filter plugin \- adjust standard preamble for Python 3 \([https\://github\.com/ansible\-collections/community\.general/pull/9585](https\://github\.com/ansible\-collections/community\.general/pull/9585)\)\.
-* timestamp callback plugin \- adjust standard preamble for Python 3 \([https\://github\.com/ansible\-collections/community\.general/pull/9583](https\://github\.com/ansible\-collections/community\.general/pull/9583)\)\.
-* timezone \- open file using open\(\)
as a context manager \([https\://github\.com/ansible\-collections/community\.general/pull/9579](https\://github\.com/ansible\-collections/community\.general/pull/9579)\)\.
-* to\_ini filter plugin \- adjust standard preamble for Python 3 \([https\://github\.com/ansible\-collections/community\.general/pull/9585](https\://github\.com/ansible\-collections/community\.general/pull/9585)\)\.
-* ufw \- add support for vrrp
protocol \([https\://github\.com/ansible\-collections/community\.general/issues/9562](https\://github\.com/ansible\-collections/community\.general/issues/9562)\, [https\://github\.com/ansible\-collections/community\.general/pull/9582](https\://github\.com/ansible\-collections/community\.general/pull/9582)\)\.
-* unicode\_normalize filter plugin \- adjust standard preamble for Python 3 \([https\://github\.com/ansible\-collections/community\.general/pull/9585](https\://github\.com/ansible\-collections/community\.general/pull/9585)\)\.
-* unixy callback plugin \- adjust standard preamble for Python 3 \([https\://github\.com/ansible\-collections/community\.general/pull/9583](https\://github\.com/ansible\-collections/community\.general/pull/9583)\)\.
-* version\_sort filter plugin \- adjust standard preamble for Python 3 \([https\://github\.com/ansible\-collections/community\.general/pull/9585](https\://github\.com/ansible\-collections/community\.general/pull/9585)\)\.
-* virtualbox inventory plugin \- adjust standard preamble for Python 3 \([https\://github\.com/ansible\-collections/community\.general/pull/9584](https\://github\.com/ansible\-collections/community\.general/pull/9584)\)\.
-* xen\_orchestra inventory plugin \- adjust standard preamble for Python 3 \([https\://github\.com/ansible\-collections/community\.general/pull/9584](https\://github\.com/ansible\-collections/community\.general/pull/9584)\)\.
-* yaml cache plugin \- adjust standard preamble for Python 3 \([https\://github\.com/ansible\-collections/community\.general/pull/9583](https\://github\.com/ansible\-collections/community\.general/pull/9583)\)\.
-* yaml callback plugin \- adjust standard preamble for Python 3 \([https\://github\.com/ansible\-collections/community\.general/pull/9583](https\://github\.com/ansible\-collections/community\.general/pull/9583)\)\.
-* zone connection plugin \- adjust standard preamble for Python 3 \([https\://github\.com/ansible\-collections/community\.general/pull/9584](https\://github\.com/ansible\-collections/community\.general/pull/9584)\)\.
-
-
-### Deprecated Features
-
-* MH module utils \- attribute debug
definition in subclasses of MH is now deprecated\, as that name will become a delegation to AnsibleModule
in community\.general 12\.0\.0\, and any such attribute will be overridden by that delegation in that version \([https\://github\.com/ansible\-collections/community\.general/pull/9577](https\://github\.com/ansible\-collections/community\.general/pull/9577)\)\.
-* proxmox \- removes default value false
of update
parameter\. This will be changed to a default of true
in community\.general 11\.0\.0 \([https\://github\.com/ansible\-collections/community\.general/pull/9225](https\://github\.com/ansible\-collections/community\.general/pull/9225)\)\.
-
-
-### Security Fixes
-
-* keycloak\_client \- Sanitize saml\.encryption\.private\.key
so it does not show in the logs \([https\://github\.com/ansible\-collections/community\.general/pull/9621](https\://github\.com/ansible\-collections/community\.general/pull/9621)\)\.
-
-
-### Bugfixes
-
-* homebrew \- fix incorrect handling of homebrew modules when a tap is requested \([https\://github\.com/ansible\-collections/community\.general/pull/9546](https\://github\.com/ansible\-collections/community\.general/pull/9546)\, [https\://github\.com/ansible\-collections/community\.general/issues/9533](https\://github\.com/ansible\-collections/community\.general/issues/9533)\)\.
-* iocage inventory plugin \- the plugin parses the IP4 tab of the jails list and put the elements into the new variable iocage\_ip4\_dict
\. In multiple interface format the variable iocage\_ip4
keeps the comma\-separated list of IP4 \([https\://github\.com/ansible\-collections/community\.general/issues/9538](https\://github\.com/ansible\-collections/community\.general/issues/9538)\)\.
-* pipx \- honor option global
when state\=latest
\([https\://github\.com/ansible\-collections/community\.general/pull/9623](https\://github\.com/ansible\-collections/community\.general/pull/9623)\)\.
-* proxmox \- fixes idempotency of template conversions \([https\://github\.com/ansible\-collections/community\.general/pull/9225](https\://github\.com/ansible\-collections/community\.general/pull/9225)\, [https\://github\.com/ansible\-collections/community\.general/issues/8811](https\://github\.com/ansible\-collections/community\.general/issues/8811)\)\.
-* proxmox \- fixes incorrect parsing for bind\-only mounts \([https\://github\.com/ansible\-collections/community\.general/pull/9225](https\://github\.com/ansible\-collections/community\.general/pull/9225)\, [https\://github\.com/ansible\-collections/community\.general/issues/8982](https\://github\.com/ansible\-collections/community\.general/issues/8982)\)\.
-* proxmox \- fixes issues with disk\_volume variable \([https\://github\.com/ansible\-collections/community\.general/pull/9225](https\://github\.com/ansible\-collections/community\.general/pull/9225)\, [https\://github\.com/ansible\-collections/community\.general/issues/9065](https\://github\.com/ansible\-collections/community\.general/issues/9065)\)\.
-* proxmox module utils \- fixes ignoring of choose\_first\_if\_multiple
argument in get\_vmid
\([https\://github\.com/ansible\-collections/community\.general/pull/9225](https\://github\.com/ansible\-collections/community\.general/pull/9225)\)\.
-* redhat\_subscription \- do not try to unsubscribe \(i\.e\. remove subscriptions\)
- when unregistering a system\: newer versions of subscription\-manager\, as
- available in EL 10 and Fedora 41\+\, do not support entitlements anymore\, and
- thus unsubscribing will fail
- \([https\://github\.com/ansible\-collections/community\.general/pull/9578](https\://github\.com/ansible\-collections/community\.general/pull/9578)\)\.
-
-
-### New Plugins
-
-
-#### Connection
-
-* community\.general\.proxmox\_pct\_remote \- Run tasks in Proxmox LXC container instances using pct CLI via SSH\.
-
-
-#### Filter
-
-* community\.general\.json\_diff \- Create a JSON patch by comparing two JSON files\.
-* community\.general\.json\_patch \- Apply a JSON\-Patch \(RFC 6902\) operation to an object\.
-* community\.general\.json\_patch\_recipe \- Apply JSON\-Patch \(RFC 6902\) operations to an object\.
-
-
-#### Lookup
-
-* community\.general\.onepassword\_ssh\_key \- Fetch SSH keys stored in 1Password\.
-
-
-### New Modules
-
-* community\.general\.proxmox\_backup\_info \- Retrieve information on Proxmox scheduled backups\.
-
-
-## v10\.2\.0
-
-
-### Release Summary
-
-Regular bugfix and feature release\.
-
-
-### Minor Changes
-
-* bitwarden lookup plugin \- use f\-strings instead of interpolations or format
\([https\://github\.com/ansible\-collections/community\.general/pull/9324](https\://github\.com/ansible\-collections/community\.general/pull/9324)\)\.
-* cgroup\_memory\_recap callback plugin \- use f\-strings instead of interpolations or format
\([https\://github\.com/ansible\-collections/community\.general/pull/9321](https\://github\.com/ansible\-collections/community\.general/pull/9321)\)\.
-* chef\_databag lookup plugin \- use f\-strings instead of interpolations or format
\([https\://github\.com/ansible\-collections/community\.general/pull/9324](https\://github\.com/ansible\-collections/community\.general/pull/9324)\)\.
-* chroot connection plugin \- clean up string conversions \([https\://github\.com/ansible\-collections/community\.general/pull/9379](https\://github\.com/ansible\-collections/community\.general/pull/9379)\)\.
-* chroot connection plugin \- use f\-strings instead of interpolations or format
\([https\://github\.com/ansible\-collections/community\.general/pull/9322](https\://github\.com/ansible\-collections/community\.general/pull/9322)\)\.
-* cobbler inventory plugin \- clean up string conversions \([https\://github\.com/ansible\-collections/community\.general/pull/9379](https\://github\.com/ansible\-collections/community\.general/pull/9379)\)\.
-* cobbler inventory plugin \- use f\-strings instead of interpolations or format
\([https\://github\.com/ansible\-collections/community\.general/pull/9323](https\://github\.com/ansible\-collections/community\.general/pull/9323)\)\.
-* collection\_version lookup plugin \- use f\-strings instead of interpolations or format
\([https\://github\.com/ansible\-collections/community\.general/pull/9324](https\://github\.com/ansible\-collections/community\.general/pull/9324)\)\.
-* consul\_kv lookup plugin \- use f\-strings instead of interpolations or format
\([https\://github\.com/ansible\-collections/community\.general/pull/9324](https\://github\.com/ansible\-collections/community\.general/pull/9324)\)\.
-* context\_demo callback plugin \- use f\-strings instead of interpolations or format
\([https\://github\.com/ansible\-collections/community\.general/pull/9321](https\://github\.com/ansible\-collections/community\.general/pull/9321)\)\.
-* counter\_enabled callback plugin \- use f\-strings instead of interpolations or format
\([https\://github\.com/ansible\-collections/community\.general/pull/9321](https\://github\.com/ansible\-collections/community\.general/pull/9321)\)\.
-* credstash lookup plugin \- use f\-strings instead of interpolations or format
\([https\://github\.com/ansible\-collections/community\.general/pull/9324](https\://github\.com/ansible\-collections/community\.general/pull/9324)\)\.
-* cyberarkpassword lookup plugin \- clean up string conversions \([https\://github\.com/ansible\-collections/community\.general/pull/9379](https\://github\.com/ansible\-collections/community\.general/pull/9379)\)\.
-* cyberarkpassword lookup plugin \- use f\-strings instead of interpolations or format
\([https\://github\.com/ansible\-collections/community\.general/pull/9324](https\://github\.com/ansible\-collections/community\.general/pull/9324)\)\.
-* dense callback plugin \- use f\-strings instead of interpolations or format
\([https\://github\.com/ansible\-collections/community\.general/pull/9321](https\://github\.com/ansible\-collections/community\.general/pull/9321)\)\.
-* dependent lookup plugin \- use f\-strings instead of interpolations or format
\([https\://github\.com/ansible\-collections/community\.general/pull/9324](https\://github\.com/ansible\-collections/community\.general/pull/9324)\)\.
-* dig lookup plugin \- clean up string conversions \([https\://github\.com/ansible\-collections/community\.general/pull/9379](https\://github\.com/ansible\-collections/community\.general/pull/9379)\)\.
-* dig lookup plugin \- use f\-strings instead of interpolations or format
\([https\://github\.com/ansible\-collections/community\.general/pull/9324](https\://github\.com/ansible\-collections/community\.general/pull/9324)\)\.
-* diy callback plugin \- use f\-strings instead of interpolations or format
\([https\://github\.com/ansible\-collections/community\.general/pull/9321](https\://github\.com/ansible\-collections/community\.general/pull/9321)\)\.
-* dnstxt lookup plugin \- clean up string conversions \([https\://github\.com/ansible\-collections/community\.general/pull/9379](https\://github\.com/ansible\-collections/community\.general/pull/9379)\)\.
-* dnstxt lookup plugin \- use f\-strings instead of interpolations or format
\([https\://github\.com/ansible\-collections/community\.general/pull/9324](https\://github\.com/ansible\-collections/community\.general/pull/9324)\)\.
-* doas become plugin \- use f\-strings instead of interpolations or format
\([https\://github\.com/ansible\-collections/community\.general/pull/9319](https\://github\.com/ansible\-collections/community\.general/pull/9319)\)\.
-* dsv lookup plugin \- use f\-strings instead of interpolations or format
\([https\://github\.com/ansible\-collections/community\.general/pull/9324](https\://github\.com/ansible\-collections/community\.general/pull/9324)\)\.
-* dzdo become plugin \- use f\-strings instead of interpolations or format
\([https\://github\.com/ansible\-collections/community\.general/pull/9319](https\://github\.com/ansible\-collections/community\.general/pull/9319)\)\.
-* elastic callback plugin \- use f\-strings instead of interpolations or format
\([https\://github\.com/ansible\-collections/community\.general/pull/9321](https\://github\.com/ansible\-collections/community\.general/pull/9321)\)\.
-* etcd lookup plugin \- use f\-strings instead of interpolations or format
\([https\://github\.com/ansible\-collections/community\.general/pull/9324](https\://github\.com/ansible\-collections/community\.general/pull/9324)\)\.
-* etcd3 lookup plugin \- clean up string conversions \([https\://github\.com/ansible\-collections/community\.general/pull/9379](https\://github\.com/ansible\-collections/community\.general/pull/9379)\)\.
-* etcd3 lookup plugin \- use f\-strings instead of interpolations or format
\([https\://github\.com/ansible\-collections/community\.general/pull/9324](https\://github\.com/ansible\-collections/community\.general/pull/9324)\)\.
-* filetree lookup plugin \- use f\-strings instead of interpolations or format
\([https\://github\.com/ansible\-collections/community\.general/pull/9324](https\://github\.com/ansible\-collections/community\.general/pull/9324)\)\.
-* from\_csv filter plugin \- clean up string conversions \([https\://github\.com/ansible\-collections/community\.general/pull/9379](https\://github\.com/ansible\-collections/community\.general/pull/9379)\)\.
-* from\_ini filter plugin \- clean up string conversions \([https\://github\.com/ansible\-collections/community\.general/pull/9379](https\://github\.com/ansible\-collections/community\.general/pull/9379)\)\.
-* funcd connection plugin \- use f\-strings instead of interpolations or format
\([https\://github\.com/ansible\-collections/community\.general/pull/9322](https\://github\.com/ansible\-collections/community\.general/pull/9322)\)\.
-* github\_app\_access\_token lookup plugin \- use f\-strings instead of interpolations or format
\([https\://github\.com/ansible\-collections/community\.general/pull/9324](https\://github\.com/ansible\-collections/community\.general/pull/9324)\)\.
-* gitlab\_instance\_variable \- add support for raw
variables suboption \([https\://github\.com/ansible\-collections/community\.general/pull/9425](https\://github\.com/ansible\-collections/community\.general/pull/9425)\)\.
-* gitlab\_runners inventory plugin \- clean up string conversions \([https\://github\.com/ansible\-collections/community\.general/pull/9379](https\://github\.com/ansible\-collections/community\.general/pull/9379)\)\.
-* gitlab\_runners inventory plugin \- use f\-strings instead of interpolations or format
\([https\://github\.com/ansible\-collections/community\.general/pull/9323](https\://github\.com/ansible\-collections/community\.general/pull/9323)\)\.
-* hiera lookup plugin \- use f\-strings instead of interpolations or format
\([https\://github\.com/ansible\-collections/community\.general/pull/9324](https\://github\.com/ansible\-collections/community\.general/pull/9324)\)\.
-* icinga2 inventory plugin \- use f\-strings instead of interpolations or format
\([https\://github\.com/ansible\-collections/community\.general/pull/9323](https\://github\.com/ansible\-collections/community\.general/pull/9323)\)\.
-* incus connection plugin \- use f\-strings instead of interpolations or format
\([https\://github\.com/ansible\-collections/community\.general/pull/9322](https\://github\.com/ansible\-collections/community\.general/pull/9322)\)\.
-* iocage connection plugin \- use f\-strings instead of interpolations or format
\([https\://github\.com/ansible\-collections/community\.general/pull/9322](https\://github\.com/ansible\-collections/community\.general/pull/9322)\)\.
-* iocage inventory plugin \- clean up string conversions \([https\://github\.com/ansible\-collections/community\.general/pull/9379](https\://github\.com/ansible\-collections/community\.general/pull/9379)\)\.
-* iptables\_state action plugin \- use f\-strings instead of interpolations or format
\([https\://github\.com/ansible\-collections/community\.general/pull/9318](https\://github\.com/ansible\-collections/community\.general/pull/9318)\)\.
-* jabber callback plugin \- use f\-strings instead of interpolations or format
\([https\://github\.com/ansible\-collections/community\.general/pull/9321](https\://github\.com/ansible\-collections/community\.general/pull/9321)\)\.
-* jail connection plugin \- use f\-strings instead of interpolations or format
\([https\://github\.com/ansible\-collections/community\.general/pull/9322](https\://github\.com/ansible\-collections/community\.general/pull/9322)\)\.
-* keycloak \- add an action group for Keycloak modules to allow module\_defaults
to be set for Keycloak tasks \([https\://github\.com/ansible\-collections/community\.general/pull/9284](https\://github\.com/ansible\-collections/community\.general/pull/9284)\)\.
-* keyring lookup plugin \- use f\-strings instead of interpolations or format
\([https\://github\.com/ansible\-collections/community\.general/pull/9324](https\://github\.com/ansible\-collections/community\.general/pull/9324)\)\.
-* ksu become plugin \- use f\-strings instead of interpolations or format
\([https\://github\.com/ansible\-collections/community\.general/pull/9319](https\://github\.com/ansible\-collections/community\.general/pull/9319)\)\.
-* lastpass lookup plugin \- use f\-strings instead of interpolations or format
\([https\://github\.com/ansible\-collections/community\.general/pull/9324](https\://github\.com/ansible\-collections/community\.general/pull/9324)\)\.
-* linode inventory plugin \- use f\-strings instead of interpolations or format
\([https\://github\.com/ansible\-collections/community\.general/pull/9323](https\://github\.com/ansible\-collections/community\.general/pull/9323)\)\.
-* lmdb\_kv lookup plugin \- clean up string conversions \([https\://github\.com/ansible\-collections/community\.general/pull/9379](https\://github\.com/ansible\-collections/community\.general/pull/9379)\)\.
-* lmdb\_kv lookup plugin \- use f\-strings instead of interpolations or format
\([https\://github\.com/ansible\-collections/community\.general/pull/9324](https\://github\.com/ansible\-collections/community\.general/pull/9324)\)\.
-* locale\_gen \- invert the logic to determine ubuntu\_mode
\, making it look first for /etc/locale\.gen
\(set ubuntu\_mode
to False
\) and only then looking for /var/lib/locales/supported\.d/
\(set ubuntu\_mode
to True
\) \([https\://github\.com/ansible\-collections/community\.general/pull/9238](https\://github\.com/ansible\-collections/community\.general/pull/9238)\, [https\://github\.com/ansible\-collections/community\.general/issues/9131](https\://github\.com/ansible\-collections/community\.general/issues/9131)\, [https\://github\.com/ansible\-collections/community\.general/issues/8487](https\://github\.com/ansible\-collections/community\.general/issues/8487)\)\.
-* locale\_gen \- new return value mechanism
to better express the semantics of the ubuntu\_mode
\, with the possible values being either glibc
\(ubuntu\_mode\=False
\) or ubuntu\_legacy
\(ubuntu\_mode\=True
\) \([https\://github\.com/ansible\-collections/community\.general/pull/9238](https\://github\.com/ansible\-collections/community\.general/pull/9238)\)\.
-* log\_plays callback plugin \- use f\-strings instead of interpolations or format
\([https\://github\.com/ansible\-collections/community\.general/pull/9321](https\://github\.com/ansible\-collections/community\.general/pull/9321)\)\.
-* loganalytics callback plugin \- use f\-strings instead of interpolations or format
\([https\://github\.com/ansible\-collections/community\.general/pull/9321](https\://github\.com/ansible\-collections/community\.general/pull/9321)\)\.
-* logdna callback plugin \- use f\-strings instead of interpolations or format
\([https\://github\.com/ansible\-collections/community\.general/pull/9321](https\://github\.com/ansible\-collections/community\.general/pull/9321)\)\.
-* logentries callback plugin \- clean up string conversions \([https\://github\.com/ansible\-collections/community\.general/pull/9379](https\://github\.com/ansible\-collections/community\.general/pull/9379)\)\.
-* logentries callback plugin \- use f\-strings instead of interpolations or format
\([https\://github\.com/ansible\-collections/community\.general/pull/9321](https\://github\.com/ansible\-collections/community\.general/pull/9321)\)\.
-* lxc connection plugin \- use f\-strings instead of interpolations or format
\([https\://github\.com/ansible\-collections/community\.general/pull/9322](https\://github\.com/ansible\-collections/community\.general/pull/9322)\)\.
-* lxd connection plugin \- use f\-strings instead of interpolations or format
\([https\://github\.com/ansible\-collections/community\.general/pull/9322](https\://github\.com/ansible\-collections/community\.general/pull/9322)\)\.
-* lxd inventory plugin \- clean up string conversions \([https\://github\.com/ansible\-collections/community\.general/pull/9379](https\://github\.com/ansible\-collections/community\.general/pull/9379)\)\.
-* lxd inventory plugin \- use f\-strings instead of interpolations or format
\([https\://github\.com/ansible\-collections/community\.general/pull/9323](https\://github\.com/ansible\-collections/community\.general/pull/9323)\)\.
-* machinectl become plugin \- use f\-strings instead of interpolations or format
\([https\://github\.com/ansible\-collections/community\.general/pull/9319](https\://github\.com/ansible\-collections/community\.general/pull/9319)\)\.
-* mail callback plugin \- use f\-strings instead of interpolations or format
\([https\://github\.com/ansible\-collections/community\.general/pull/9321](https\://github\.com/ansible\-collections/community\.general/pull/9321)\)\.
-* manageiq\_alert\_profiles \- improve handling of parameter requirements \([https\://github\.com/ansible\-collections/community\.general/pull/9449](https\://github\.com/ansible\-collections/community\.general/pull/9449)\)\.
-* manifold lookup plugin \- clean up string conversions \([https\://github\.com/ansible\-collections/community\.general/pull/9379](https\://github\.com/ansible\-collections/community\.general/pull/9379)\)\.
-* manifold lookup plugin \- use f\-strings instead of interpolations or format
\([https\://github\.com/ansible\-collections/community\.general/pull/9324](https\://github\.com/ansible\-collections/community\.general/pull/9324)\)\.
-* memcached cache plugin \- use f\-strings instead of interpolations or format
\([https\://github\.com/ansible\-collections/community\.general/pull/9320](https\://github\.com/ansible\-collections/community\.general/pull/9320)\)\.
-* merge\_variables lookup plugin \- use f\-strings instead of interpolations or format
\([https\://github\.com/ansible\-collections/community\.general/pull/9324](https\://github\.com/ansible\-collections/community\.general/pull/9324)\)\.
-* nmap inventory plugin \- clean up string conversions \([https\://github\.com/ansible\-collections/community\.general/pull/9379](https\://github\.com/ansible\-collections/community\.general/pull/9379)\)\.
-* nmap inventory plugin \- use f\-strings instead of interpolations or format
\([https\://github\.com/ansible\-collections/community\.general/pull/9323](https\://github\.com/ansible\-collections/community\.general/pull/9323)\)\.
-* nrdp callback plugin \- use f\-strings instead of interpolations or format
\([https\://github\.com/ansible\-collections/community\.general/pull/9321](https\://github\.com/ansible\-collections/community\.general/pull/9321)\)\.
-* onepassword lookup plugin \- clean up string conversions \([https\://github\.com/ansible\-collections/community\.general/pull/9379](https\://github\.com/ansible\-collections/community\.general/pull/9379)\)\.
-* onepassword lookup plugin \- use f\-strings instead of interpolations or format
\([https\://github\.com/ansible\-collections/community\.general/pull/9324](https\://github\.com/ansible\-collections/community\.general/pull/9324)\)\.
-* onepassword\_doc lookup plugin \- use f\-strings instead of interpolations or format
\([https\://github\.com/ansible\-collections/community\.general/pull/9324](https\://github\.com/ansible\-collections/community\.general/pull/9324)\)\.
-* online inventory plugin \- use f\-strings instead of interpolations or format
\([https\://github\.com/ansible\-collections/community\.general/pull/9323](https\://github\.com/ansible\-collections/community\.general/pull/9323)\)\.
-* opennebula inventory plugin \- clean up string conversions \([https\://github\.com/ansible\-collections/community\.general/pull/9379](https\://github\.com/ansible\-collections/community\.general/pull/9379)\)\.
-* opennebula inventory plugin \- use f\-strings instead of interpolations or format
\([https\://github\.com/ansible\-collections/community\.general/pull/9323](https\://github\.com/ansible\-collections/community\.general/pull/9323)\)\.
-* opentelemetry callback plugin \- remove code handling Python versions prior to 3\.7 \([https\://github\.com/ansible\-collections/community\.general/pull/9482](https\://github\.com/ansible\-collections/community\.general/pull/9482)\)\.
-* opentelemetry callback plugin \- remove code handling Python versions prior to 3\.7 \([https\://github\.com/ansible\-collections/community\.general/pull/9503](https\://github\.com/ansible\-collections/community\.general/pull/9503)\)\.
-* opentelemetry callback plugin \- use f\-strings instead of interpolations or format
\([https\://github\.com/ansible\-collections/community\.general/pull/9321](https\://github\.com/ansible\-collections/community\.general/pull/9321)\)\.
-* pacemaker\_cluster \- remove unused code \([https\://github\.com/ansible\-collections/community\.general/pull/9471](https\://github\.com/ansible\-collections/community\.general/pull/9471)\)\.
-* pacemaker\_cluster \- using safer mechanism to run external command \([https\://github\.com/ansible\-collections/community\.general/pull/9471](https\://github\.com/ansible\-collections/community\.general/pull/9471)\)\.
-* passwordstore lookup plugin \- use f\-strings instead of interpolations or format
\([https\://github\.com/ansible\-collections/community\.general/pull/9324](https\://github\.com/ansible\-collections/community\.general/pull/9324)\)\.
-* pbrun become plugin \- use f\-strings instead of interpolations or format
\([https\://github\.com/ansible\-collections/community\.general/pull/9319](https\://github\.com/ansible\-collections/community\.general/pull/9319)\)\.
-* pfexec become plugin \- use f\-strings instead of interpolations or format
\([https\://github\.com/ansible\-collections/community\.general/pull/9319](https\://github\.com/ansible\-collections/community\.general/pull/9319)\)\.
-* pmrun become plugin \- use f\-strings instead of interpolations or format
\([https\://github\.com/ansible\-collections/community\.general/pull/9319](https\://github\.com/ansible\-collections/community\.general/pull/9319)\)\.
-* proxmox inventory plugin \- clean up string conversions \([https\://github\.com/ansible\-collections/community\.general/pull/9379](https\://github\.com/ansible\-collections/community\.general/pull/9379)\)\.
-* proxmox inventory plugin \- strip whitespace from user
\, token\_id
\, and token\_secret
\([https\://github\.com/ansible\-collections/community\.general/issues/9227](https\://github\.com/ansible\-collections/community\.general/issues/9227)\, [https\://github\.com/ansible\-collections/community\.general/pull/9228/](https\://github\.com/ansible\-collections/community\.general/pull/9228/)\)\.
-* proxmox inventory plugin \- use f\-strings instead of interpolations or format
\([https\://github\.com/ansible\-collections/community\.general/pull/9323](https\://github\.com/ansible\-collections/community\.general/pull/9323)\)\.
-* proxmox module utils \- add method api\_task\_complete
that can wait for task completion and return error message \([https\://github\.com/ansible\-collections/community\.general/pull/9256](https\://github\.com/ansible\-collections/community\.general/pull/9256)\)\.
-* proxmox\_backup \- refactor permission checking to improve code readability and maintainability \([https\://github\.com/ansible\-collections/community\.general/pull/9239](https\://github\.com/ansible\-collections/community\.general/pull/9239)\)\.
-* qubes connection plugin \- use f\-strings instead of interpolations or format
\([https\://github\.com/ansible\-collections/community\.general/pull/9322](https\://github\.com/ansible\-collections/community\.general/pull/9322)\)\.
-* random\_pet lookup plugin \- use f\-strings instead of interpolations or format
\([https\://github\.com/ansible\-collections/community\.general/pull/9324](https\://github\.com/ansible\-collections/community\.general/pull/9324)\)\.
-* redis cache plugin \- clean up string conversions \([https\://github\.com/ansible\-collections/community\.general/pull/9379](https\://github\.com/ansible\-collections/community\.general/pull/9379)\)\.
-* redis cache plugin \- use f\-strings instead of interpolations or format
\([https\://github\.com/ansible\-collections/community\.general/pull/9320](https\://github\.com/ansible\-collections/community\.general/pull/9320)\)\.
-* redis lookup plugin \- use f\-strings instead of interpolations or format
\([https\://github\.com/ansible\-collections/community\.general/pull/9324](https\://github\.com/ansible\-collections/community\.general/pull/9324)\)\.
-* revbitspss lookup plugin \- use f\-strings instead of interpolations or format
\([https\://github\.com/ansible\-collections/community\.general/pull/9324](https\://github\.com/ansible\-collections/community\.general/pull/9324)\)\.
-* saltstack connection plugin \- use f\-strings instead of interpolations or format
\([https\://github\.com/ansible\-collections/community\.general/pull/9322](https\://github\.com/ansible\-collections/community\.general/pull/9322)\)\.
-* say callback plugin \- use f\-strings instead of interpolations or format
\([https\://github\.com/ansible\-collections/community\.general/pull/9321](https\://github\.com/ansible\-collections/community\.general/pull/9321)\)\.
-* scaleway inventory plugin \- clean up string conversions \([https\://github\.com/ansible\-collections/community\.general/pull/9379](https\://github\.com/ansible\-collections/community\.general/pull/9379)\)\.
-* scaleway inventory plugin \- use f\-strings instead of interpolations or format
\([https\://github\.com/ansible\-collections/community\.general/pull/9323](https\://github\.com/ansible\-collections/community\.general/pull/9323)\)\.
-* selective callback plugin \- use f\-strings instead of interpolations or format
\([https\://github\.com/ansible\-collections/community\.general/pull/9321](https\://github\.com/ansible\-collections/community\.general/pull/9321)\)\.
-* sesu become plugin \- use f\-strings instead of interpolations or format
\([https\://github\.com/ansible\-collections/community\.general/pull/9319](https\://github\.com/ansible\-collections/community\.general/pull/9319)\)\.
-* shelvefile lookup plugin \- use f\-strings instead of interpolations or format
\([https\://github\.com/ansible\-collections/community\.general/pull/9324](https\://github\.com/ansible\-collections/community\.general/pull/9324)\)\.
-* shutdown action plugin \- clean up string conversions \([https\://github\.com/ansible\-collections/community\.general/pull/9379](https\://github\.com/ansible\-collections/community\.general/pull/9379)\)\.
-* shutdown action plugin \- use f\-strings instead of interpolations or format
\([https\://github\.com/ansible\-collections/community\.general/pull/9318](https\://github\.com/ansible\-collections/community\.general/pull/9318)\)\.
-* slack callback plugin \- clean up string conversions \([https\://github\.com/ansible\-collections/community\.general/pull/9379](https\://github\.com/ansible\-collections/community\.general/pull/9379)\)\.
-* slack callback plugin \- use f\-strings instead of interpolations or format
\([https\://github\.com/ansible\-collections/community\.general/pull/9321](https\://github\.com/ansible\-collections/community\.general/pull/9321)\)\.
-* splunk callback plugin \- use f\-strings instead of interpolations or format
\([https\://github\.com/ansible\-collections/community\.general/pull/9321](https\://github\.com/ansible\-collections/community\.general/pull/9321)\)\.
-* stackpath\_compute inventory plugin \- use f\-strings instead of interpolations or format
\([https\://github\.com/ansible\-collections/community\.general/pull/9323](https\://github\.com/ansible\-collections/community\.general/pull/9323)\)\.
-* sudosu become plugin \- use f\-strings instead of interpolations or format
\([https\://github\.com/ansible\-collections/community\.general/pull/9319](https\://github\.com/ansible\-collections/community\.general/pull/9319)\)\.
-* timestamp callback plugin \- use f\-strings instead of interpolations or format
\([https\://github\.com/ansible\-collections/community\.general/pull/9321](https\://github\.com/ansible\-collections/community\.general/pull/9321)\)\.
-* to\_ini filter plugin \- clean up string conversions \([https\://github\.com/ansible\-collections/community\.general/pull/9379](https\://github\.com/ansible\-collections/community\.general/pull/9379)\)\.
-* tss lookup plugin \- clean up string conversions \([https\://github\.com/ansible\-collections/community\.general/pull/9379](https\://github\.com/ansible\-collections/community\.general/pull/9379)\)\.
-* tss lookup plugin \- use f\-strings instead of interpolations or format
\([https\://github\.com/ansible\-collections/community\.general/pull/9324](https\://github\.com/ansible\-collections/community\.general/pull/9324)\)\.
-* unixy callback plugin \- use f\-strings instead of interpolations or format
\([https\://github\.com/ansible\-collections/community\.general/pull/9321](https\://github\.com/ansible\-collections/community\.general/pull/9321)\)\.
-* virtualbox inventory plugin \- clean up string conversions \([https\://github\.com/ansible\-collections/community\.general/pull/9379](https\://github\.com/ansible\-collections/community\.general/pull/9379)\)\.
-* virtualbox inventory plugin \- use f\-strings instead of interpolations or format
\([https\://github\.com/ansible\-collections/community\.general/pull/9323](https\://github\.com/ansible\-collections/community\.general/pull/9323)\)\.
-* xbps \- add root
and repository
options to enable bootstrapping new void installations \([https\://github\.com/ansible\-collections/community\.general/pull/9174](https\://github\.com/ansible\-collections/community\.general/pull/9174)\)\.
-* xen\_orchestra inventory plugin \- use f\-strings instead of interpolations or format
\([https\://github\.com/ansible\-collections/community\.general/pull/9323](https\://github\.com/ansible\-collections/community\.general/pull/9323)\)\.
-* xfconf \- add return value version
\([https\://github\.com/ansible\-collections/community\.general/pull/9226](https\://github\.com/ansible\-collections/community\.general/pull/9226)\)\.
-* xfconf\_info \- add return value version
\([https\://github\.com/ansible\-collections/community\.general/pull/9226](https\://github\.com/ansible\-collections/community\.general/pull/9226)\)\.
-* yaml callback plugin \- use f\-strings instead of interpolations or format
\([https\://github\.com/ansible\-collections/community\.general/pull/9321](https\://github\.com/ansible\-collections/community\.general/pull/9321)\)\.
-* zone connection plugin \- use f\-strings instead of interpolations or format
\([https\://github\.com/ansible\-collections/community\.general/pull/9322](https\://github\.com/ansible\-collections/community\.general/pull/9322)\)\.
-* zypper \- add quiet
option \([https\://github\.com/ansible\-collections/community\.general/pull/9270](https\://github\.com/ansible\-collections/community\.general/pull/9270)\)\.
-* zypper \- add simple\_errors
option \([https\://github\.com/ansible\-collections/community\.general/pull/9270](https\://github\.com/ansible\-collections/community\.general/pull/9270)\)\.
-
-
-### Deprecated Features
-
-* atomic\_container \- module is deprecated and will be removed in community\.general 13\.0\.0 \([https\://github\.com/ansible\-collections/community\.general/pull/9487](https\://github\.com/ansible\-collections/community\.general/pull/9487)\)\.
-* atomic\_host \- module is deprecated and will be removed in community\.general 13\.0\.0 \([https\://github\.com/ansible\-collections/community\.general/pull/9487](https\://github\.com/ansible\-collections/community\.general/pull/9487)\)\.
-* atomic\_image \- module is deprecated and will be removed in community\.general 13\.0\.0 \([https\://github\.com/ansible\-collections/community\.general/pull/9487](https\://github\.com/ansible\-collections/community\.general/pull/9487)\)\.
-* facter \- module is deprecated and will be removed in community\.general 12\.0\.0\, use community\.general\.facter\_facts
instead \([https\://github\.com/ansible\-collections/community\.general/pull/9451](https\://github\.com/ansible\-collections/community\.general/pull/9451)\)\.
-* locale\_gen \- ubuntu\_mode\=True
\, or mechanism\=ubuntu\_legacy
is deprecated and will be removed in community\.general 13\.0\.0 \([https\://github\.com/ansible\-collections/community\.general/pull/9238](https\://github\.com/ansible\-collections/community\.general/pull/9238)\)\.
-* pure module utils \- the module utils is deprecated and will be removed from community\.general 12\.0\.0\. The modules using this were removed in community\.general 3\.0\.0 \([https\://github\.com/ansible\-collections/community\.general/pull/9432](https\://github\.com/ansible\-collections/community\.general/pull/9432)\)\.
-* purestorage doc fragments \- the doc fragment is deprecated and will be removed from community\.general 12\.0\.0\. The modules using this were removed in community\.general 3\.0\.0 \([https\://github\.com/ansible\-collections/community\.general/pull/9432](https\://github\.com/ansible\-collections/community\.general/pull/9432)\)\.
-* sensu\_check \- module is deprecated and will be removed in community\.general 13\.0\.0\, use collection sensu\.sensu\_go
instead \([https\://github\.com/ansible\-collections/community\.general/pull/9483](https\://github\.com/ansible\-collections/community\.general/pull/9483)\)\.
-* sensu\_client \- module is deprecated and will be removed in community\.general 13\.0\.0\, use collection sensu\.sensu\_go
instead \([https\://github\.com/ansible\-collections/community\.general/pull/9483](https\://github\.com/ansible\-collections/community\.general/pull/9483)\)\.
-* sensu\_handler \- module is deprecated and will be removed in community\.general 13\.0\.0\, use collection sensu\.sensu\_go
instead \([https\://github\.com/ansible\-collections/community\.general/pull/9483](https\://github\.com/ansible\-collections/community\.general/pull/9483)\)\.
-* sensu\_silence \- module is deprecated and will be removed in community\.general 13\.0\.0\, use collection sensu\.sensu\_go
instead \([https\://github\.com/ansible\-collections/community\.general/pull/9483](https\://github\.com/ansible\-collections/community\.general/pull/9483)\)\.
-* sensu\_subscription \- module is deprecated and will be removed in community\.general 13\.0\.0\, use collection sensu\.sensu\_go
instead \([https\://github\.com/ansible\-collections/community\.general/pull/9483](https\://github\.com/ansible\-collections/community\.general/pull/9483)\)\.
-* slack \- the default value auto
of the prepend\_hash
option is deprecated and will change to never
in community\.general 12\.0\.0 \([https\://github\.com/ansible\-collections/community\.general/pull/9443](https\://github\.com/ansible\-collections/community\.general/pull/9443)\)\.
-* yaml callback plugin \- deprecate plugin in favor of result\_format\=yaml
in plugin ansible\.bulitin\.default
\([https\://github\.com/ansible\-collections/community\.general/pull/9456](https\://github\.com/ansible\-collections/community\.general/pull/9456)\)\.
-
-
-### Security Fixes
-
-* keycloak\_authentication \- API calls did not properly set the priority
during update resulting in incorrectly sorted authentication flows\. This apparently only affects Keycloak 25 or newer \([https\://github\.com/ansible\-collections/community\.general/pull/9263](https\://github\.com/ansible\-collections/community\.general/pull/9263)\)\.
-
-
-### Bugfixes
-
-* dig lookup plugin \- correctly handle NoNameserver
exception \([https\://github\.com/ansible\-collections/community\.general/pull/9363](https\://github\.com/ansible\-collections/community\.general/pull/9363)\, [https\://github\.com/ansible\-collections/community\.general/issues/9362](https\://github\.com/ansible\-collections/community\.general/issues/9362)\)\.
-* homebrew \- fix incorrect handling of aliased homebrew modules when the alias is requested \([https\://github\.com/ansible\-collections/community\.general/pull/9255](https\://github\.com/ansible\-collections/community\.general/pull/9255)\, [https\://github\.com/ansible\-collections/community\.general/issues/9240](https\://github\.com/ansible\-collections/community\.general/issues/9240)\)\.
-* htpasswd \- report changes when file permissions are adjusted \([https\://github\.com/ansible\-collections/community\.general/issues/9485](https\://github\.com/ansible\-collections/community\.general/issues/9485)\, [https\://github\.com/ansible\-collections/community\.general/pull/9490](https\://github\.com/ansible\-collections/community\.general/pull/9490)\)\.
-* proxmox\_backup \- fix incorrect key lookup in vmid permission check \([https\://github\.com/ansible\-collections/community\.general/pull/9223](https\://github\.com/ansible\-collections/community\.general/pull/9223)\)\.
-* proxmox\_disk \- fix async method and make resize\_disk
method handle errors correctly \([https\://github\.com/ansible\-collections/community\.general/pull/9256](https\://github\.com/ansible\-collections/community\.general/pull/9256)\)\.
-* proxmox\_template \- fix the wrong path called on proxmox\_template\.task\_status
\([https\://github\.com/ansible\-collections/community\.general/issues/9276](https\://github\.com/ansible\-collections/community\.general/issues/9276)\, [https\://github\.com/ansible\-collections/community\.general/pull/9277](https\://github\.com/ansible\-collections/community\.general/pull/9277)\)\.
-* qubes connection plugin \- fix the printing of debug information \([https\://github\.com/ansible\-collections/community\.general/pull/9334](https\://github\.com/ansible\-collections/community\.general/pull/9334)\)\.
-* redfish\_utils module utils \- Fix VerifyBiosAttributes
command on multi system resource nodes \([https\://github\.com/ansible\-collections/community\.general/pull/9234](https\://github\.com/ansible\-collections/community\.general/pull/9234)\)\.
-
-
-### New Plugins
-
-
-#### Inventory
-
-* community\.general\.iocage \- iocage inventory source\.
-
-
-### New Modules
-
-* community\.general\.android\_sdk \- Manages Android SDK packages\.
-* community\.general\.ldap\_inc \- Use the Modify\-Increment LDAP V3 feature to increment an attribute value\.
-* community\.general\.systemd\_creds\_decrypt \- C\(systemd\)\'s C\(systemd\-creds decrypt\) plugin\.
-* community\.general\.systemd\_creds\_encrypt \- C\(systemd\)\'s C\(systemd\-creds encrypt\) plugin\.
-
-
-## v10\.1\.0
-
-
-### Release Summary
-
-Regular bugfix and feature release\.
-
-
-### Minor Changes
-
-* alternatives \- add family
parameter that allows to utilize the \-\-family
option available in RedHat version of update\-alternatives \([https\://github\.com/ansible\-collections/community\.general/issues/5060](https\://github\.com/ansible\-collections/community\.general/issues/5060)\, [https\://github\.com/ansible\-collections/community\.general/pull/9096](https\://github\.com/ansible\-collections/community\.general/pull/9096)\)\.
-* cloudflare\_dns \- add support for comment
and tags
\([https\://github\.com/ansible\-collections/community\.general/pull/9132](https\://github\.com/ansible\-collections/community\.general/pull/9132)\)\.
-* deps module utils \- add deps\.clear\(\)
to clear out previously declared dependencies \([https\://github\.com/ansible\-collections/community\.general/pull/9179](https\://github\.com/ansible\-collections/community\.general/pull/9179)\)\.
-* homebrew \- greatly speed up module when multiple packages are passed in the name
option \([https\://github\.com/ansible\-collections/community\.general/pull/9181](https\://github\.com/ansible\-collections/community\.general/pull/9181)\)\.
-* homebrew \- remove duplicated package name validation \([https\://github\.com/ansible\-collections/community\.general/pull/9076](https\://github\.com/ansible\-collections/community\.general/pull/9076)\)\.
-* iso\_extract \- adds password
parameter that is passed to 7z \([https\://github\.com/ansible\-collections/community\.general/pull/9159](https\://github\.com/ansible\-collections/community\.general/pull/9159)\)\.
-* launchd \- add plist
option for services such as sshd\, where the plist filename doesn\'t match the service name \([https\://github\.com/ansible\-collections/community\.general/pull/9102](https\://github\.com/ansible\-collections/community\.general/pull/9102)\)\.
-* nmcli \- add sriov
parameter that enables support for SR\-IOV settings \([https\://github\.com/ansible\-collections/community\.general/pull/9168](https\://github\.com/ansible\-collections/community\.general/pull/9168)\)\.
-* pipx \- add return value version
\([https\://github\.com/ansible\-collections/community\.general/pull/9180](https\://github\.com/ansible\-collections/community\.general/pull/9180)\)\.
-* pipx\_info \- add return value version
\([https\://github\.com/ansible\-collections/community\.general/pull/9180](https\://github\.com/ansible\-collections/community\.general/pull/9180)\)\.
-* proxmox\_template \- add server side artifact fetching support \([https\://github\.com/ansible\-collections/community\.general/pull/9113](https\://github\.com/ansible\-collections/community\.general/pull/9113)\)\.
-* redfish\_command \- add update\_custom\_oem\_header
\, update\_custom\_oem\_params
\, and update\_custom\_oem\_mime\_type
options \([https\://github\.com/ansible\-collections/community\.general/pull/9123](https\://github\.com/ansible\-collections/community\.general/pull/9123)\)\.
-* redfish\_utils module utils \- remove redundant code \([https\://github\.com/ansible\-collections/community\.general/pull/9190](https\://github\.com/ansible\-collections/community\.general/pull/9190)\)\.
-* rpm\_ostree\_pkg \- added the options apply\_live
\([https\://github\.com/ansible\-collections/community\.general/pull/9167](https\://github\.com/ansible\-collections/community\.general/pull/9167)\)\.
-* rpm\_ostree\_pkg \- added the return value needs\_reboot
\([https\://github\.com/ansible\-collections/community\.general/pull/9167](https\://github\.com/ansible\-collections/community\.general/pull/9167)\)\.
-* scaleway\_lb \- minor simplification in the code \([https\://github\.com/ansible\-collections/community\.general/pull/9189](https\://github\.com/ansible\-collections/community\.general/pull/9189)\)\.
-* ssh\_config \- add dynamicforward
option \([https\://github\.com/ansible\-collections/community\.general/pull/9192](https\://github\.com/ansible\-collections/community\.general/pull/9192)\)\.
-
-
-### Deprecated Features
-
-* opkg \- deprecate value \"\"
for parameter force
\([https\://github\.com/ansible\-collections/community\.general/pull/9172](https\://github\.com/ansible\-collections/community\.general/pull/9172)\)\.
-* redfish\_utils module utils \- deprecate method RedfishUtils\.\_init\_session\(\)
\([https\://github\.com/ansible\-collections/community\.general/pull/9190](https\://github\.com/ansible\-collections/community\.general/pull/9190)\)\.
-
-
-### Bugfixes
-
-* dnf\_config\_manager \- fix hanging when prompting to import GPG keys \([https\://github\.com/ansible\-collections/community\.general/pull/9124](https\://github\.com/ansible\-collections/community\.general/pull/9124)\, [https\://github\.com/ansible\-collections/community\.general/issues/8830](https\://github\.com/ansible\-collections/community\.general/issues/8830)\)\.
-* dnf\_config\_manager \- forces locale to C
before module starts\. If the locale was set to non\-English\, the output of the dnf config\-manager
could not be parsed \([https\://github\.com/ansible\-collections/community\.general/pull/9157](https\://github\.com/ansible\-collections/community\.general/pull/9157)\, [https\://github\.com/ansible\-collections/community\.general/issues/9046](https\://github\.com/ansible\-collections/community\.general/issues/9046)\)\.
-* flatpak \- force the locale language to C
when running the flatpak command \([https\://github\.com/ansible\-collections/community\.general/pull/9187](https\://github\.com/ansible\-collections/community\.general/pull/9187)\, [https\://github\.com/ansible\-collections/community\.general/issues/8883](https\://github\.com/ansible\-collections/community\.general/issues/8883)\)\.
-* gio\_mime \- fix command line when determining version of gio
\([https\://github\.com/ansible\-collections/community\.general/pull/9171](https\://github\.com/ansible\-collections/community\.general/pull/9171)\, [https\://github\.com/ansible\-collections/community\.general/issues/9158](https\://github\.com/ansible\-collections/community\.general/issues/9158)\)\.
-* github\_key \- in check mode\, a faulty call to \`datetime\.strftime\(\.\.\.\)\`
was being made which generated an exception \([https\://github\.com/ansible\-collections/community\.general/issues/9185](https\://github\.com/ansible\-collections/community\.general/issues/9185)\)\.
-* homebrew\_cask \- allow \+
symbol in Homebrew cask name validation regex \([https\://github\.com/ansible\-collections/community\.general/pull/9128](https\://github\.com/ansible\-collections/community\.general/pull/9128)\)\.
-* keycloak\_clientscope\_type \- sort the default and optional clientscope lists to improve the diff \([https\://github\.com/ansible\-collections/community\.general/pull/9202](https\://github\.com/ansible\-collections/community\.general/pull/9202)\)\.
-* slack \- fail if Slack API response is not OK with error message \([https\://github\.com/ansible\-collections/community\.general/pull/9198](https\://github\.com/ansible\-collections/community\.general/pull/9198)\)\.
-
-
-### New Plugins
-
-
-#### Filter
-
-* community\.general\.accumulate \- Produce a list of accumulated sums of the input list contents\.
-
-
-### New Modules
-
-* community\.general\.decompress \- Decompresses compressed files\.
-* community\.general\.proxmox\_backup \- Start a VM backup in Proxmox VE cluster\.
-
-
-## v10\.0\.1
-
-
-### Release Summary
-
-Bugfix release for inclusion in Ansible 11\.0\.0rc1\.
-
-
-### Bugfixes
-
-* keycloak\_client \- fix diff by removing code that turns the attributes dict which contains additional settings into a list \([https\://github\.com/ansible\-collections/community\.general/pull/9077](https\://github\.com/ansible\-collections/community\.general/pull/9077)\)\.
-* keycloak\_clientscope \- fix diff and end\_state
by removing the code that turns the attributes dict\, which contains additional config items\, into a list \([https\://github\.com/ansible\-collections/community\.general/pull/9082](https\://github\.com/ansible\-collections/community\.general/pull/9082)\)\.
-* redfish\_utils module utils \- remove undocumented default applytime \([https\://github\.com/ansible\-collections/community\.general/pull/9114](https\://github\.com/ansible\-collections/community\.general/pull/9114)\)\.
-
-
-## v10\.0\.0
-
-
-### Release Summary
-
-This is release 10\.0\.0 of community\.general
\, released on 2024\-11\-04\.
-
-
-### Minor Changes
-
-* CmdRunner module util \- argument formats can be specified as plain functions without calling cmd\_runner\_fmt\.as\_func\(\)
\([https\://github\.com/ansible\-collections/community\.general/pull/8479](https\://github\.com/ansible\-collections/community\.general/pull/8479)\)\.
-* CmdRunner module utils \- the parameter force\_lang
now supports the special value auto
which will automatically try and determine the best parsable locale in the system \([https\://github\.com/ansible\-collections/community\.general/pull/8517](https\://github\.com/ansible\-collections/community\.general/pull/8517)\)\.
-* MH module utils \- add parameter when
to cause\_changes
decorator \([https\://github\.com/ansible\-collections/community\.general/pull/8766](https\://github\.com/ansible\-collections/community\.general/pull/8766)\)\.
-* MH module utils \- minor refactor in decorators \([https\://github\.com/ansible\-collections/community\.general/pull/8766](https\://github\.com/ansible\-collections/community\.general/pull/8766)\)\.
-* alternatives \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8833](https\://github\.com/ansible\-collections/community\.general/pull/8833)\)\.
-* ansible\_galaxy\_install \- add return value version
\([https\://github\.com/ansible\-collections/community\.general/pull/9060](https\://github\.com/ansible\-collections/community\.general/pull/9060)\)\.
-* ansible\_galaxy\_install \- add upgrade feature \([https\://github\.com/ansible\-collections/community\.general/pull/8431](https\://github\.com/ansible\-collections/community\.general/pull/8431)\, [https\://github\.com/ansible\-collections/community\.general/issues/8351](https\://github\.com/ansible\-collections/community\.general/issues/8351)\)\.
-* ansible\_galaxy\_install \- minor refactor in the module \([https\://github\.com/ansible\-collections/community\.general/pull/8413](https\://github\.com/ansible\-collections/community\.general/pull/8413)\)\.
-* apache2\_mod\_proxy \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8814](https\://github\.com/ansible\-collections/community\.general/pull/8814)\)\.
-* apache2\_mod\_proxy \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8833](https\://github\.com/ansible\-collections/community\.general/pull/8833)\)\.
-* cargo \- add option directory
\, which allows source directory to be specified \([https\://github\.com/ansible\-collections/community\.general/pull/8480](https\://github\.com/ansible\-collections/community\.general/pull/8480)\)\.
-* cgroup\_memory\_recap\, hipchat\, jabber\, log\_plays\, loganalytics\, logentries\, logstash\, slack\, splunk\, sumologic\, syslog\_json callback plugins \- make sure that all options are typed \([https\://github\.com/ansible\-collections/community\.general/pull/8628](https\://github\.com/ansible\-collections/community\.general/pull/8628)\)\.
-* chef\_databag\, consul\_kv\, cyberarkpassword\, dsv\, etcd\, filetree\, hiera\, onepassword\, onepassword\_doc\, onepassword\_raw\, passwordstore\, redis\, shelvefile\, tss lookup plugins \- make sure that all options are typed \([https\://github\.com/ansible\-collections/community\.general/pull/8626](https\://github\.com/ansible\-collections/community\.general/pull/8626)\)\.
-* chroot\, funcd\, incus\, iocage\, jail\, lxc\, lxd\, qubes\, zone connection plugins \- make sure that all options are typed \([https\://github\.com/ansible\-collections/community\.general/pull/8627](https\://github\.com/ansible\-collections/community\.general/pull/8627)\)\.
-* cmd\_runner module utils \- add decorator cmd\_runner\_fmt\.stack
\([https\://github\.com/ansible\-collections/community\.general/pull/8415](https\://github\.com/ansible\-collections/community\.general/pull/8415)\)\.
-* cmd\_runner module utils \- refactor argument formatting code to its own Python module \([https\://github\.com/ansible\-collections/community\.general/pull/8964](https\://github\.com/ansible\-collections/community\.general/pull/8964)\)\.
-* cmd\_runner\_fmt module utils \- simplify implementation of cmd\_runner\_fmt\.as\_bool\_not\(\)
\([https\://github\.com/ansible\-collections/community\.general/pull/8512](https\://github\.com/ansible\-collections/community\.general/pull/8512)\)\.
-* cobbler\, linode\, lxd\, nmap\, online\, scaleway\, stackpath\_compute\, virtualbox inventory plugins \- make sure that all options are typed \([https\://github\.com/ansible\-collections/community\.general/pull/8625](https\://github\.com/ansible\-collections/community\.general/pull/8625)\)\.
-* consul\_acl \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8833](https\://github\.com/ansible\-collections/community\.general/pull/8833)\)\.
-* consul\_kv \- add argument for the datacenter option on Consul API \([https\://github\.com/ansible\-collections/community\.general/pull/9026](https\://github\.com/ansible\-collections/community\.general/pull/9026)\)\.
-* copr \- Added includepkgs
and excludepkgs
parameters to limit the list of packages fetched or excluded from the repository\([https\://github\.com/ansible\-collections/community\.general/pull/8779](https\://github\.com/ansible\-collections/community\.general/pull/8779)\)\.
-* cpanm \- add return value cpanm\_version
\([https\://github\.com/ansible\-collections/community\.general/pull/9061](https\://github\.com/ansible\-collections/community\.general/pull/9061)\)\.
-* credstash lookup plugin \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8822](https\://github\.com/ansible\-collections/community\.general/pull/8822)\)\.
-* csv module utils \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8814](https\://github\.com/ansible\-collections/community\.general/pull/8814)\)\.
-* deco MH module utils \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8822](https\://github\.com/ansible\-collections/community\.general/pull/8822)\)\.
-* dig lookup plugin \- add port
option to specify DNS server port \([https\://github\.com/ansible\-collections/community\.general/pull/8966](https\://github\.com/ansible\-collections/community\.general/pull/8966)\)\.
-* django module utils \- always retrieve version \([https\://github\.com/ansible\-collections/community\.general/pull/9063](https\://github\.com/ansible\-collections/community\.general/pull/9063)\)\.
-* django\_check \- add return value version
\([https\://github\.com/ansible\-collections/community\.general/pull/9063](https\://github\.com/ansible\-collections/community\.general/pull/9063)\)\.
-* django\_command \- add return value version
\([https\://github\.com/ansible\-collections/community\.general/pull/9063](https\://github\.com/ansible\-collections/community\.general/pull/9063)\)\.
-* django\_createcachetable \- add return value version
\([https\://github\.com/ansible\-collections/community\.general/pull/9063](https\://github\.com/ansible\-collections/community\.general/pull/9063)\)\.
-* doas\, dzdo\, ksu\, machinectl\, pbrun\, pfexec\, pmrun\, sesu\, sudosu become plugins \- make sure that all options are typed \([https\://github\.com/ansible\-collections/community\.general/pull/8623](https\://github\.com/ansible\-collections/community\.general/pull/8623)\)\.
-* etcd3 \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8822](https\://github\.com/ansible\-collections/community\.general/pull/8822)\)\.
-* flatpak \- improve the parsing of Flatpak application IDs based on official guidelines \([https\://github\.com/ansible\-collections/community\.general/pull/8909](https\://github\.com/ansible\-collections/community\.general/pull/8909)\)\.
-* gconftool2 \- make use of ModuleHelper
features to simplify code \([https\://github\.com/ansible\-collections/community\.general/pull/8711](https\://github\.com/ansible\-collections/community\.general/pull/8711)\)\.
-* gcontool2 \- add return value version
\([https\://github\.com/ansible\-collections/community\.general/pull/9064](https\://github\.com/ansible\-collections/community\.general/pull/9064)\)\.
-* gcontool2 module utils \- add argument formatter version
\([https\://github\.com/ansible\-collections/community\.general/pull/9064](https\://github\.com/ansible\-collections/community\.general/pull/9064)\)\.
-* gcontool2\_info \- add return value version
\([https\://github\.com/ansible\-collections/community\.general/pull/9064](https\://github\.com/ansible\-collections/community\.general/pull/9064)\)\.
-* gio\_mime \- add return value version
\([https\://github\.com/ansible\-collections/community\.general/pull/9067](https\://github\.com/ansible\-collections/community\.general/pull/9067)\)\.
-* gio\_mime \- adjust code ahead of the old VardDict
deprecation \([https\://github\.com/ansible\-collections/community\.general/pull/8855](https\://github\.com/ansible\-collections/community\.general/pull/8855)\)\.
-* gio\_mime \- mute the old VarDict
deprecation \([https\://github\.com/ansible\-collections/community\.general/pull/8776](https\://github\.com/ansible\-collections/community\.general/pull/8776)\)\.
-* gio\_mime module utils \- add argument formatter version
\([https\://github\.com/ansible\-collections/community\.general/pull/9067](https\://github\.com/ansible\-collections/community\.general/pull/9067)\)\.
-* github\_app\_access\_token lookup plugin \- adds new private\_key
parameter \([https\://github\.com/ansible\-collections/community\.general/pull/8989](https\://github\.com/ansible\-collections/community\.general/pull/8989)\)\.
-* gitlab\_deploy\_key \- better construct when using dict\.items\(\)
\([https\://github\.com/ansible\-collections/community\.general/pull/8876](https\://github\.com/ansible\-collections/community\.general/pull/8876)\)\.
-* gitlab\_group \- add many new parameters \([https\://github\.com/ansible\-collections/community\.general/pull/8908](https\://github\.com/ansible\-collections/community\.general/pull/8908)\)\.
-* gitlab\_group \- better construct when using dict\.items\(\)
\([https\://github\.com/ansible\-collections/community\.general/pull/8876](https\://github\.com/ansible\-collections/community\.general/pull/8876)\)\.
-* gitlab\_group \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8814](https\://github\.com/ansible\-collections/community\.general/pull/8814)\)\.
-* gitlab\_issue \- better construct when using dict\.items\(\)
\([https\://github\.com/ansible\-collections/community\.general/pull/8876](https\://github\.com/ansible\-collections/community\.general/pull/8876)\)\.
-* gitlab\_merge\_request \- better construct when using dict\.items\(\)
\([https\://github\.com/ansible\-collections/community\.general/pull/8876](https\://github\.com/ansible\-collections/community\.general/pull/8876)\)\.
-* gitlab\_project \- add option container\_expiration\_policy
to schedule container registry cleanup \([https\://github\.com/ansible\-collections/community\.general/pull/8674](https\://github\.com/ansible\-collections/community\.general/pull/8674)\)\.
-* gitlab\_project \- add option issues\_access\_level
to enable/disable project issues \([https\://github\.com/ansible\-collections/community\.general/pull/8760](https\://github\.com/ansible\-collections/community\.general/pull/8760)\)\.
-* gitlab\_project \- add option model\_registry\_access\_level
to disable model registry \([https\://github\.com/ansible\-collections/community\.general/pull/8688](https\://github\.com/ansible\-collections/community\.general/pull/8688)\)\.
-* gitlab\_project \- add option pages\_access\_level
to disable project pages \([https\://github\.com/ansible\-collections/community\.general/pull/8688](https\://github\.com/ansible\-collections/community\.general/pull/8688)\)\.
-* gitlab\_project \- add option repository\_access\_level
to disable project repository \([https\://github\.com/ansible\-collections/community\.general/pull/8674](https\://github\.com/ansible\-collections/community\.general/pull/8674)\)\.
-* gitlab\_project \- add option service\_desk\_enabled
to disable service desk \([https\://github\.com/ansible\-collections/community\.general/pull/8688](https\://github\.com/ansible\-collections/community\.general/pull/8688)\)\.
-* gitlab\_project \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8822](https\://github\.com/ansible\-collections/community\.general/pull/8822)\)\.
-* gitlab\_project \- sorted parameters in order to avoid future merge conflicts \([https\://github\.com/ansible\-collections/community\.general/pull/8759](https\://github\.com/ansible\-collections/community\.general/pull/8759)\)\.
-* gitlab\_runner \- better construct when using dict\.items\(\)
\([https\://github\.com/ansible\-collections/community\.general/pull/8876](https\://github\.com/ansible\-collections/community\.general/pull/8876)\)\.
-* hashids filter plugin \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8814](https\://github\.com/ansible\-collections/community\.general/pull/8814)\)\.
-* homebrew \- speed up brew install and upgrade \([https\://github\.com/ansible\-collections/community\.general/pull/9022](https\://github\.com/ansible\-collections/community\.general/pull/9022)\)\.
-* hwc\_ecs\_instance \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8822](https\://github\.com/ansible\-collections/community\.general/pull/8822)\)\.
-* hwc\_evs\_disk \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8822](https\://github\.com/ansible\-collections/community\.general/pull/8822)\)\.
-* hwc\_vpc\_eip \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8822](https\://github\.com/ansible\-collections/community\.general/pull/8822)\)\.
-* hwc\_vpc\_peering\_connect \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8822](https\://github\.com/ansible\-collections/community\.general/pull/8822)\)\.
-* hwc\_vpc\_port \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8822](https\://github\.com/ansible\-collections/community\.general/pull/8822)\)\.
-* hwc\_vpc\_subnet \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8822](https\://github\.com/ansible\-collections/community\.general/pull/8822)\)\.
-* icinga2\_host \- replace loop with dict comprehension \([https\://github\.com/ansible\-collections/community\.general/pull/8876](https\://github\.com/ansible\-collections/community\.general/pull/8876)\)\.
-* imc\_rest \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8833](https\://github\.com/ansible\-collections/community\.general/pull/8833)\)\.
-* ipa\_dnsrecord \- adds SSHFP
record type for managing SSH fingerprints in FreeIPA DNS \([https\://github\.com/ansible\-collections/community\.general/pull/8404](https\://github\.com/ansible\-collections/community\.general/pull/8404)\)\.
-* ipa\_otptoken \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8822](https\://github\.com/ansible\-collections/community\.general/pull/8822)\)\.
-* jenkins\_node \- add offline\_message
parameter for updating a Jenkins node offline cause reason when the state is \"disabled\" \(offline\) \([https\://github\.com/ansible\-collections/community\.general/pull/9084](https\://github\.com/ansible\-collections/community\.general/pull/9084)\)\.\"
-* jira \- adjust code ahead of the old VardDict
deprecation \([https\://github\.com/ansible\-collections/community\.general/pull/8856](https\://github\.com/ansible\-collections/community\.general/pull/8856)\)\.
-* jira \- mute the old VarDict
deprecation \([https\://github\.com/ansible\-collections/community\.general/pull/8776](https\://github\.com/ansible\-collections/community\.general/pull/8776)\)\.
-* jira \- replace deprecated params when using decorator cause\_changes
\([https\://github\.com/ansible\-collections/community\.general/pull/8791](https\://github\.com/ansible\-collections/community\.general/pull/8791)\)\.
-* keep\_keys filter plugin \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8814](https\://github\.com/ansible\-collections/community\.general/pull/8814)\)\.
-* keycloak module utils \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8822](https\://github\.com/ansible\-collections/community\.general/pull/8822)\)\.
-* keycloak\_client \- add client\-x509
choice to client\_authenticator\_type
\([https\://github\.com/ansible\-collections/community\.general/pull/8973](https\://github\.com/ansible\-collections/community\.general/pull/8973)\)\.
-* keycloak\_client \- assign auth flow by name \([https\://github\.com/ansible\-collections/community\.general/pull/8428](https\://github\.com/ansible\-collections/community\.general/pull/8428)\)\.
-* keycloak\_client \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8814](https\://github\.com/ansible\-collections/community\.general/pull/8814)\)\.
-* keycloak\_clientscope \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8814](https\://github\.com/ansible\-collections/community\.general/pull/8814)\)\.
-* keycloak\_identity\_provider \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8814](https\://github\.com/ansible\-collections/community\.general/pull/8814)\)\.
-* keycloak\_realm \- add boolean toggle to configure organization support for a given keycloak realm \([https\://github\.com/ansible\-collections/community\.general/issues/9027](https\://github\.com/ansible\-collections/community\.general/issues/9027)\, [https\://github\.com/ansible\-collections/community\.general/pull/8927/](https\://github\.com/ansible\-collections/community\.general/pull/8927/)\)\.
-* keycloak\_user\_federation \- add module argument allowing users to optout of the removal of unspecified mappers\, for example to keep the keycloak default mappers \([https\://github\.com/ansible\-collections/community\.general/pull/8764](https\://github\.com/ansible\-collections/community\.general/pull/8764)\)\.
-* keycloak\_user\_federation \- add the user federation config parameter referral
to the module arguments \([https\://github\.com/ansible\-collections/community\.general/pull/8954](https\://github\.com/ansible\-collections/community\.general/pull/8954)\)\.
-* keycloak\_user\_federation \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8814](https\://github\.com/ansible\-collections/community\.general/pull/8814)\)\.
-* keycloak\_user\_federation \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8822](https\://github\.com/ansible\-collections/community\.general/pull/8822)\)\.
-* keycloak\_user\_federation \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8833](https\://github\.com/ansible\-collections/community\.general/pull/8833)\)\.
-* linode \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8814](https\://github\.com/ansible\-collections/community\.general/pull/8814)\)\.
-* locale\_gen \- add support for multiple locales \([https\://github\.com/ansible\-collections/community\.general/issues/8677](https\://github\.com/ansible\-collections/community\.general/issues/8677)\, [https\://github\.com/ansible\-collections/community\.general/pull/8682](https\://github\.com/ansible\-collections/community\.general/pull/8682)\)\.
-* lxc\_container \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8822](https\://github\.com/ansible\-collections/community\.general/pull/8822)\)\.
-* lxd\_container \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8814](https\://github\.com/ansible\-collections/community\.general/pull/8814)\)\.
-* manageiq\_provider \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8814](https\://github\.com/ansible\-collections/community\.general/pull/8814)\)\.
-* mattermost \- adds support for message priority \([https\://github\.com/ansible\-collections/community\.general/issues/9068](https\://github\.com/ansible\-collections/community\.general/issues/9068)\, [https\://github\.com/ansible\-collections/community\.general/pull/9087](https\://github\.com/ansible\-collections/community\.general/pull/9087)\)\.
-* memcached\, pickle\, redis\, yaml cache plugins \- make sure that all options are typed \([https\://github\.com/ansible\-collections/community\.general/pull/8624](https\://github\.com/ansible\-collections/community\.general/pull/8624)\)\.
-* memset\_dns\_reload \- replace loop with dict\(\)
\([https\://github\.com/ansible\-collections/community\.general/pull/8876](https\://github\.com/ansible\-collections/community\.general/pull/8876)\)\.
-* memset\_memstore\_info \- replace loop with dict\(\)
\([https\://github\.com/ansible\-collections/community\.general/pull/8876](https\://github\.com/ansible\-collections/community\.general/pull/8876)\)\.
-* memset\_server\_info \- replace loop with dict\(\)
\([https\://github\.com/ansible\-collections/community\.general/pull/8876](https\://github\.com/ansible\-collections/community\.general/pull/8876)\)\.
-* memset\_zone \- replace loop with dict\(\)
\([https\://github\.com/ansible\-collections/community\.general/pull/8876](https\://github\.com/ansible\-collections/community\.general/pull/8876)\)\.
-* memset\_zone\_domain \- replace loop with dict\(\)
\([https\://github\.com/ansible\-collections/community\.general/pull/8876](https\://github\.com/ansible\-collections/community\.general/pull/8876)\)\.
-* memset\_zone\_record \- replace loop with dict\(\)
\([https\://github\.com/ansible\-collections/community\.general/pull/8876](https\://github\.com/ansible\-collections/community\.general/pull/8876)\)\.
-* nmcli \- add conn\_enable
param to reload connection \([https\://github\.com/ansible\-collections/community\.general/issues/3752](https\://github\.com/ansible\-collections/community\.general/issues/3752)\, [https\://github\.com/ansible\-collections/community\.general/issues/8704](https\://github\.com/ansible\-collections/community\.general/issues/8704)\, [https\://github\.com/ansible\-collections/community\.general/pull/8897](https\://github\.com/ansible\-collections/community\.general/pull/8897)\)\.
-* nmcli \- add state\=up
and state\=down
to enable/disable connections \([https\://github\.com/ansible\-collections/community\.general/issues/3752](https\://github\.com/ansible\-collections/community\.general/issues/3752)\, [https\://github\.com/ansible\-collections/community\.general/issues/8704](https\://github\.com/ansible\-collections/community\.general/issues/8704)\, [https\://github\.com/ansible\-collections/community\.general/issues/7152](https\://github\.com/ansible\-collections/community\.general/issues/7152)\, [https\://github\.com/ansible\-collections/community\.general/pull/8897](https\://github\.com/ansible\-collections/community\.general/pull/8897)\)\.
-* nmcli \- better construct when using dict\.items\(\)
\([https\://github\.com/ansible\-collections/community\.general/pull/8876](https\://github\.com/ansible\-collections/community\.general/pull/8876)\)\.
-* npm \- add force
parameter to allow \-\-force
\([https\://github\.com/ansible\-collections/community\.general/pull/8885](https\://github\.com/ansible\-collections/community\.general/pull/8885)\)\.
-* ocapi\_utils \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8833](https\://github\.com/ansible\-collections/community\.general/pull/8833)\)\.
-* one\_image \- add create
\, template
and datastore\_id
arguments for image creation \([https\://github\.com/ansible\-collections/community\.general/pull/9075](https\://github\.com/ansible\-collections/community\.general/pull/9075)\)\.
-* one\_image \- add wait\_timeout
argument for adjustable timeouts \([https\://github\.com/ansible\-collections/community\.general/pull/9075](https\://github\.com/ansible\-collections/community\.general/pull/9075)\)\.
-* one\_image \- add option persistent
to manage image persistence \([https\://github\.com/ansible\-collections/community\.general/issues/3578](https\://github\.com/ansible\-collections/community\.general/issues/3578)\, [https\://github\.com/ansible\-collections/community\.general/pull/8889](https\://github\.com/ansible\-collections/community\.general/pull/8889)\)\.
-* one\_image \- extend xsd scheme to make it return a lot more info about image \([https\://github\.com/ansible\-collections/community\.general/pull/8889](https\://github\.com/ansible\-collections/community\.general/pull/8889)\)\.
-* one\_image \- refactor code to make it more similar to one\_template
and one\_vnet
\([https\://github\.com/ansible\-collections/community\.general/pull/8889](https\://github\.com/ansible\-collections/community\.general/pull/8889)\)\.
-* one\_image\_info \- extend xsd scheme to make it return a lot more info about image \([https\://github\.com/ansible\-collections/community\.general/pull/8889](https\://github\.com/ansible\-collections/community\.general/pull/8889)\)\.
-* one\_image\_info \- refactor code to make it more similar to one\_template
and one\_vnet
\([https\://github\.com/ansible\-collections/community\.general/pull/8889](https\://github\.com/ansible\-collections/community\.general/pull/8889)\)\.
-* one\_service \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8814](https\://github\.com/ansible\-collections/community\.general/pull/8814)\)\.
-* one\_vm \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8814](https\://github\.com/ansible\-collections/community\.general/pull/8814)\)\.
-* onepassword lookup plugin \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8833](https\://github\.com/ansible\-collections/community\.general/pull/8833)\)\.
-* open\_iscsi \- allow login to a portal with multiple targets without specifying any of them \([https\://github\.com/ansible\-collections/community\.general/pull/8719](https\://github\.com/ansible\-collections/community\.general/pull/8719)\)\.
-* openbsd\_pkg \- adds diff support to show changes in installed package list\. This does not yet work for check mode \([https\://github\.com/ansible\-collections/community\.general/pull/8402](https\://github\.com/ansible\-collections/community\.general/pull/8402)\)\.
-* opennebula\.py \- add VM id
and VM host
to inventory host data \([https\://github\.com/ansible\-collections/community\.general/pull/8532](https\://github\.com/ansible\-collections/community\.general/pull/8532)\)\.
-* opentelemetry callback plugin \- fix default value for store\_spans\_in\_file
causing traces to be produced to a file named None
\([https\://github\.com/ansible\-collections/community\.general/issues/8566](https\://github\.com/ansible\-collections/community\.general/issues/8566)\, [https\://github\.com/ansible\-collections/community\.general/pull/8741](https\://github\.com/ansible\-collections/community\.general/pull/8741)\)\.
-* opkg \- add return value version
\([https\://github\.com/ansible\-collections/community\.general/pull/9086](https\://github\.com/ansible\-collections/community\.general/pull/9086)\)\.
-* passwordstore lookup plugin \- add subkey creation/update support \([https\://github\.com/ansible\-collections/community\.general/pull/8952](https\://github\.com/ansible\-collections/community\.general/pull/8952)\)\.
-* passwordstore lookup plugin \- add the current user to the lockfile file name to address issues on multi\-user systems \([https\://github\.com/ansible\-collections/community\.general/pull/8689](https\://github\.com/ansible\-collections/community\.general/pull/8689)\)\.
-* pids \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8833](https\://github\.com/ansible\-collections/community\.general/pull/8833)\)\.
-* pipx \- add parameter suffix
to module \([https\://github\.com/ansible\-collections/community\.general/pull/8675](https\://github\.com/ansible\-collections/community\.general/pull/8675)\, [https\://github\.com/ansible\-collections/community\.general/issues/8656](https\://github\.com/ansible\-collections/community\.general/issues/8656)\)\.
-* pipx \- added new states install\_all
\, uninject
\, upgrade\_shared
\, pin
\, and unpin
\([https\://github\.com/ansible\-collections/community\.general/pull/8809](https\://github\.com/ansible\-collections/community\.general/pull/8809)\)\.
-* pipx \- added parameter global
to module \([https\://github\.com/ansible\-collections/community\.general/pull/8793](https\://github\.com/ansible\-collections/community\.general/pull/8793)\)\.
-* pipx \- refactor out parsing of pipx list
output to module utils \([https\://github\.com/ansible\-collections/community\.general/pull/9044](https\://github\.com/ansible\-collections/community\.general/pull/9044)\)\.
-* pipx \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8833](https\://github\.com/ansible\-collections/community\.general/pull/8833)\)\.
-* pipx\_info \- add new return value pinned
\([https\://github\.com/ansible\-collections/community\.general/pull/9044](https\://github\.com/ansible\-collections/community\.general/pull/9044)\)\.
-* pipx\_info \- added parameter global
to module \([https\://github\.com/ansible\-collections/community\.general/pull/8793](https\://github\.com/ansible\-collections/community\.general/pull/8793)\)\.
-* pipx\_info \- refactor out parsing of pipx list
output to module utils \([https\://github\.com/ansible\-collections/community\.general/pull/9044](https\://github\.com/ansible\-collections/community\.general/pull/9044)\)\.
-* pipx\_info \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8833](https\://github\.com/ansible\-collections/community\.general/pull/8833)\)\.
-* pkg5\_publisher \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8833](https\://github\.com/ansible\-collections/community\.general/pull/8833)\)\.
-* pkgng \- add option use\_globs
\(default true
\) to optionally disable glob patterns \([https\://github\.com/ansible\-collections/community\.general/issues/8632](https\://github\.com/ansible\-collections/community\.general/issues/8632)\, [https\://github\.com/ansible\-collections/community\.general/pull/8633](https\://github\.com/ansible\-collections/community\.general/pull/8633)\)\.
-* proxmox \- add disk\_volume
and mount\_volumes
keys for better readability \([https\://github\.com/ansible\-collections/community\.general/pull/8542](https\://github\.com/ansible\-collections/community\.general/pull/8542)\)\.
-* proxmox \- allow specification of the API port when using proxmox\_\* \([https\://github\.com/ansible\-collections/community\.general/issues/8440](https\://github\.com/ansible\-collections/community\.general/issues/8440)\, [https\://github\.com/ansible\-collections/community\.general/pull/8441](https\://github\.com/ansible\-collections/community\.general/pull/8441)\)\.
-* proxmox \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8814](https\://github\.com/ansible\-collections/community\.general/pull/8814)\)\.
-* proxmox \- translate the old disk
and mounts
keys to the new handling internally \([https\://github\.com/ansible\-collections/community\.general/pull/8542](https\://github\.com/ansible\-collections/community\.general/pull/8542)\)\.
-* proxmox inventory plugin \- add new fact for LXC interface details \([https\://github\.com/ansible\-collections/community\.general/pull/8713](https\://github\.com/ansible\-collections/community\.general/pull/8713)\)\.
-* proxmox inventory plugin \- clean up authentication code \([https\://github\.com/ansible\-collections/community\.general/pull/8917](https\://github\.com/ansible\-collections/community\.general/pull/8917)\)\.
-* proxmox inventory plugin \- fix urllib3 InsecureRequestWarnings
not being suppressed when a token is used \([https\://github\.com/ansible\-collections/community\.general/pull/9099](https\://github\.com/ansible\-collections/community\.general/pull/9099)\)\.
-* proxmox\_disk \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8814](https\://github\.com/ansible\-collections/community\.general/pull/8814)\)\.
-* proxmox\_kvm \- adds the ciupgrade
parameter to specify whether cloud\-init should upgrade system packages at first boot \([https\://github\.com/ansible\-collections/community\.general/pull/9066](https\://github\.com/ansible\-collections/community\.general/pull/9066)\)\.
-* proxmox\_kvm \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8814](https\://github\.com/ansible\-collections/community\.general/pull/8814)\)\.
-* proxmox\_kvm \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8822](https\://github\.com/ansible\-collections/community\.general/pull/8822)\)\.
-* proxmox\_template \- small refactor in logic for determining whether a template exists or not \([https\://github\.com/ansible\-collections/community\.general/pull/8516](https\://github\.com/ansible\-collections/community\.general/pull/8516)\)\.
-* proxmox\_vm\_info \- add network
option to retrieve current network information \([https\://github\.com/ansible\-collections/community\.general/pull/8471](https\://github\.com/ansible\-collections/community\.general/pull/8471)\)\.
-* redfish\_\* modules \- adds ciphers
option for custom cipher selection \([https\://github\.com/ansible\-collections/community\.general/pull/8533](https\://github\.com/ansible\-collections/community\.general/pull/8533)\)\.
-* redfish\_command \- add UpdateUserAccountTypes
command \([https\://github\.com/ansible\-collections/community\.general/issues/9058](https\://github\.com/ansible\-collections/community\.general/issues/9058)\, [https\://github\.com/ansible\-collections/community\.general/pull/9059](https\://github\.com/ansible\-collections/community\.general/pull/9059)\)\.
-* redfish\_command \- add wait
and wait\_timeout
options to allow a user to block a command until a service is accessible after performing the requested command \([https\://github\.com/ansible\-collections/community\.general/issues/8051](https\://github\.com/ansible\-collections/community\.general/issues/8051)\, [https\://github\.com/ansible\-collections/community\.general/pull/8434](https\://github\.com/ansible\-collections/community\.general/pull/8434)\)\.
-* redfish\_command \- add handling of the PasswordChangeRequired
message from services in the UpdateUserPassword
command to directly modify the user\'s password if the requested user is the one invoking the operation \([https\://github\.com/ansible\-collections/community\.general/issues/8652](https\://github\.com/ansible\-collections/community\.general/issues/8652)\, [https\://github\.com/ansible\-collections/community\.general/pull/8653](https\://github\.com/ansible\-collections/community\.general/pull/8653)\)\.
-* redfish\_confg \- remove CapacityBytes
from required paramaters of the CreateVolume
command \([https\://github\.com/ansible\-collections/community\.general/pull/8956](https\://github\.com/ansible\-collections/community\.general/pull/8956)\)\.
-* redfish\_config \- add parameter storage\_none\_volume\_deletion
to CreateVolume
command in order to control the automatic deletion of non\-RAID volumes \([https\://github\.com/ansible\-collections/community\.general/pull/8990](https\://github\.com/ansible\-collections/community\.general/pull/8990)\)\.
-* redfish\_info \- add command CheckAvailability
to check if a service is accessible \([https\://github\.com/ansible\-collections/community\.general/issues/8051](https\://github\.com/ansible\-collections/community\.general/issues/8051)\, [https\://github\.com/ansible\-collections/community\.general/pull/8434](https\://github\.com/ansible\-collections/community\.general/pull/8434)\)\.
-* redfish\_info \- adds RedfishURI
and StorageId
to Disk inventory \([https\://github\.com/ansible\-collections/community\.general/pull/8937](https\://github\.com/ansible\-collections/community\.general/pull/8937)\)\.
-* redfish\_utils \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8833](https\://github\.com/ansible\-collections/community\.general/pull/8833)\)\.
-* redfish\_utils module utils \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8822](https\://github\.com/ansible\-collections/community\.general/pull/8822)\)\.
-* redfish\_utils module utils \- schedule a BIOS configuration job at next reboot when the BIOS config is changed \([https\://github\.com/ansible\-collections/community\.general/pull/9012](https\://github\.com/ansible\-collections/community\.general/pull/9012)\)\.
-* redis cache plugin \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8833](https\://github\.com/ansible\-collections/community\.general/pull/8833)\)\.
-* redis\, redis\_info \- add client\_cert
and client\_key
options to specify path to certificate for Redis authentication \([https\://github\.com/ansible\-collections/community\.general/pull/8654](https\://github\.com/ansible\-collections/community\.general/pull/8654)\)\.
-* redis\_info \- adds support for getting cluster info \([https\://github\.com/ansible\-collections/community\.general/pull/8464](https\://github\.com/ansible\-collections/community\.general/pull/8464)\)\.
-* remove\_keys filter plugin \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8814](https\://github\.com/ansible\-collections/community\.general/pull/8814)\)\.
-* replace\_keys filter plugin \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8814](https\://github\.com/ansible\-collections/community\.general/pull/8814)\)\.
-* scaleway \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8833](https\://github\.com/ansible\-collections/community\.general/pull/8833)\)\.
-* scaleway module utils \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8822](https\://github\.com/ansible\-collections/community\.general/pull/8822)\)\.
-* scaleway\_compute \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8833](https\://github\.com/ansible\-collections/community\.general/pull/8833)\)\.
-* scaleway\_container \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8858](https\://github\.com/ansible\-collections/community\.general/pull/8858)\)\.
-* scaleway\_container\_info \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8858](https\://github\.com/ansible\-collections/community\.general/pull/8858)\)\.
-* scaleway\_container\_namespace \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8858](https\://github\.com/ansible\-collections/community\.general/pull/8858)\)\.
-* scaleway\_container\_namespace\_info \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8858](https\://github\.com/ansible\-collections/community\.general/pull/8858)\)\.
-* scaleway\_container\_registry \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8858](https\://github\.com/ansible\-collections/community\.general/pull/8858)\)\.
-* scaleway\_container\_registry\_info \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8858](https\://github\.com/ansible\-collections/community\.general/pull/8858)\)\.
-* scaleway\_function \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8858](https\://github\.com/ansible\-collections/community\.general/pull/8858)\)\.
-* scaleway\_function\_info \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8858](https\://github\.com/ansible\-collections/community\.general/pull/8858)\)\.
-* scaleway\_function\_namespace \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8858](https\://github\.com/ansible\-collections/community\.general/pull/8858)\)\.
-* scaleway\_function\_namespace\_info \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8858](https\://github\.com/ansible\-collections/community\.general/pull/8858)\)\.
-* scaleway\_ip \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8833](https\://github\.com/ansible\-collections/community\.general/pull/8833)\)\.
-* scaleway\_lb \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8833](https\://github\.com/ansible\-collections/community\.general/pull/8833)\)\.
-* scaleway\_security\_group \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8822](https\://github\.com/ansible\-collections/community\.general/pull/8822)\)\.
-* scaleway\_security\_group \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8833](https\://github\.com/ansible\-collections/community\.general/pull/8833)\)\.
-* scaleway\_user\_data \- better construct when using dict\.items\(\)
\([https\://github\.com/ansible\-collections/community\.general/pull/8876](https\://github\.com/ansible\-collections/community\.general/pull/8876)\)\.
-* scaleway\_user\_data \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8833](https\://github\.com/ansible\-collections/community\.general/pull/8833)\)\.
-* sensu\_silence \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8833](https\://github\.com/ansible\-collections/community\.general/pull/8833)\)\.
-* snmp\_facts \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8833](https\://github\.com/ansible\-collections/community\.general/pull/8833)\)\.
-* sorcery \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8833](https\://github\.com/ansible\-collections/community\.general/pull/8833)\)\.
-* sudosu become plugin \- added an option \(alt\_method
\) to enhance compatibility with more versions of su
\([https\://github\.com/ansible\-collections/community\.general/pull/8214](https\://github\.com/ansible\-collections/community\.general/pull/8214)\)\.
-* udm\_dns\_record \- replace loop with dict\.update\(\)
\([https\://github\.com/ansible\-collections/community\.general/pull/8876](https\://github\.com/ansible\-collections/community\.general/pull/8876)\)\.
-* ufw \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8822](https\://github\.com/ansible\-collections/community\.general/pull/8822)\)\.
-* unsafe plugin utils \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8814](https\://github\.com/ansible\-collections/community\.general/pull/8814)\)\.
-* vardict module utils \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8814](https\://github\.com/ansible\-collections/community\.general/pull/8814)\)\.
-* vars MH module utils \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8814](https\://github\.com/ansible\-collections/community\.general/pull/8814)\)\.
-* virtualbox inventory plugin \- expose a new parameter enable\_advanced\_group\_parsing
to change how the VirtualBox dynamic inventory parses VM groups \([https\://github\.com/ansible\-collections/community\.general/issues/8508](https\://github\.com/ansible\-collections/community\.general/issues/8508)\, [https\://github\.com/ansible\-collections/community\.general/pull/8510](https\://github\.com/ansible\-collections/community\.general/pull/8510)\)\.
-* vmadm \- replace Python 2\.6 construct with dict comprehensions \([https\://github\.com/ansible\-collections/community\.general/pull/8822](https\://github\.com/ansible\-collections/community\.general/pull/8822)\)\.
-* wdc\_redfish\_command \- minor change to handle upgrade file for Redfish WD platforms \([https\://github\.com/ansible\-collections/community\.general/pull/8444](https\://github\.com/ansible\-collections/community\.general/pull/8444)\)\.
-
-
-### Breaking Changes / Porting Guide
-
-* The collection no longer supports ansible\-core 2\.13 and ansible\-core 2\.14\. While most \(or even all\) modules and plugins might still work with these versions\, they are no longer tested in CI and breakages regarding them will not be fixed \([https\://github\.com/ansible\-collections/community\.general/pull/8921](https\://github\.com/ansible\-collections/community\.general/pull/8921)\)\.
-* cmd\_runner module utils \- CLI arguments created directly from module parameters are no longer assigned a default formatter \([https\://github\.com/ansible\-collections/community\.general/pull/8928](https\://github\.com/ansible\-collections/community\.general/pull/8928)\)\.
-* irc \- the defaults of use\_tls
and validate\_certs
changed from false
to true
\([https\://github\.com/ansible\-collections/community\.general/pull/8918](https\://github\.com/ansible\-collections/community\.general/pull/8918)\)\.
-* rhsm\_repository \- the states present
and absent
have been removed\. Use enabled
and disabled
instead \([https\://github\.com/ansible\-collections/community\.general/pull/8918](https\://github\.com/ansible\-collections/community\.general/pull/8918)\)\.
-
-
-### Deprecated Features
-
-* CmdRunner module util \- setting the value of the ignore\_none
parameter within a CmdRunner
context is deprecated and that feature should be removed in community\.general 12\.0\.0 \([https\://github\.com/ansible\-collections/community\.general/pull/8479](https\://github\.com/ansible\-collections/community\.general/pull/8479)\)\.
-* MH decorator cause\_changes module utils \- deprecate parameters on\_success
and on\_failure
\([https\://github\.com/ansible\-collections/community\.general/pull/8791](https\://github\.com/ansible\-collections/community\.general/pull/8791)\)\.
-* git\_config \- the list\_all
option has been deprecated and will be removed in community\.general 11\.0\.0\. Use the community\.general\.git\_config\_info
module instead \([https\://github\.com/ansible\-collections/community\.general/pull/8453](https\://github\.com/ansible\-collections/community\.general/pull/8453)\)\.
-* git\_config \- using state\=present
without providing value
is deprecated and will be disallowed in community\.general 11\.0\.0\. Use the community\.general\.git\_config\_info
module instead to read a value \([https\://github\.com/ansible\-collections/community\.general/pull/8453](https\://github\.com/ansible\-collections/community\.general/pull/8453)\)\.
-* hipchat \- the hipchat service has been discontinued and the self\-hosted variant has been End of Life since 2020\. The module is therefore deprecated and will be removed from community\.general 11\.0\.0 if nobody provides compelling reasons to still keep it \([https\://github\.com/ansible\-collections/community\.general/pull/8919](https\://github\.com/ansible\-collections/community\.general/pull/8919)\)\.
-* pipx \- support for versions of the command line tool pipx
older than 1\.7\.0
is deprecated and will be removed in community\.general 11\.0\.0 \([https\://github\.com/ansible\-collections/community\.general/pull/8793](https\://github\.com/ansible\-collections/community\.general/pull/8793)\)\.
-* pipx\_info \- support for versions of the command line tool pipx
older than 1\.7\.0
is deprecated and will be removed in community\.general 11\.0\.0 \([https\://github\.com/ansible\-collections/community\.general/pull/8793](https\://github\.com/ansible\-collections/community\.general/pull/8793)\)\.
-
-
-### Removed Features \(previously deprecated\)
-
-* The consul\_acl module has been removed\. Use community\.general\.consul\_token and/or community\.general\.consul\_policy instead \([https\://github\.com/ansible\-collections/community\.general/pull/8921](https\://github\.com/ansible\-collections/community\.general/pull/8921)\)\.
-* The hipchat callback plugin has been removed\. The hipchat service has been discontinued and the self\-hosted variant has been End of Life since 2020 \([https\://github\.com/ansible\-collections/community\.general/pull/8921](https\://github\.com/ansible\-collections/community\.general/pull/8921)\)\.
-* The redhat module utils has been removed \([https\://github\.com/ansible\-collections/community\.general/pull/8921](https\://github\.com/ansible\-collections/community\.general/pull/8921)\)\.
-* The rhn\_channel module has been removed \([https\://github\.com/ansible\-collections/community\.general/pull/8921](https\://github\.com/ansible\-collections/community\.general/pull/8921)\)\.
-* The rhn\_register module has been removed \([https\://github\.com/ansible\-collections/community\.general/pull/8921](https\://github\.com/ansible\-collections/community\.general/pull/8921)\)\.
-* consul \- removed the ack\_params\_state\_absent
option\. It had no effect anymore \([https\://github\.com/ansible\-collections/community\.general/pull/8918](https\://github\.com/ansible\-collections/community\.general/pull/8918)\)\.
-* ejabberd\_user \- removed the logging
option \([https\://github\.com/ansible\-collections/community\.general/pull/8918](https\://github\.com/ansible\-collections/community\.general/pull/8918)\)\.
-* gitlab modules \- remove basic auth feature \([https\://github\.com/ansible\-collections/community\.general/pull/8405](https\://github\.com/ansible\-collections/community\.general/pull/8405)\)\.
-* proxmox\_kvm \- removed the proxmox\_default\_behavior
option\. Explicitly specify the old default values if you were using proxmox\_default\_behavior\=compatibility
\, otherwise simply remove it \([https\://github\.com/ansible\-collections/community\.general/pull/8918](https\://github\.com/ansible\-collections/community\.general/pull/8918)\)\.
-* redhat\_subscriptions \- removed the pool
option\. Use pool\_ids
instead \([https\://github\.com/ansible\-collections/community\.general/pull/8918](https\://github\.com/ansible\-collections/community\.general/pull/8918)\)\.
-
-
-### Bugfixes
-
-* bitwarden lookup plugin \- fix KeyError
in search\_field
\([https\://github\.com/ansible\-collections/community\.general/issues/8549](https\://github\.com/ansible\-collections/community\.general/issues/8549)\, [https\://github\.com/ansible\-collections/community\.general/pull/8557](https\://github\.com/ansible\-collections/community\.general/pull/8557)\)\.
-* bitwarden lookup plugin \- support BWS v0\.3\.0 syntax breaking change \([https\://github\.com/ansible\-collections/community\.general/pull/9028](https\://github\.com/ansible\-collections/community\.general/pull/9028)\)\.
-* cloudflare\_dns \- fix changing Cloudflare SRV records \([https\://github\.com/ansible\-collections/community\.general/issues/8679](https\://github\.com/ansible\-collections/community\.general/issues/8679)\, [https\://github\.com/ansible\-collections/community\.general/pull/8948](https\://github\.com/ansible\-collections/community\.general/pull/8948)\)\.
-* cmd\_runner module utils \- call to get\_best\_parsable\_locales\(\)
was missing parameter \([https\://github\.com/ansible\-collections/community\.general/pull/8929](https\://github\.com/ansible\-collections/community\.general/pull/8929)\)\.
-* collection\_version lookup plugin \- use importlib
directly instead of the deprecated and in ansible\-core 2\.19 removed ansible\.module\_utils\.compat\.importlib
\([https\://github\.com/ansible\-collections/community\.general/pull/9084](https\://github\.com/ansible\-collections/community\.general/pull/9084)\)\.
-* cpanm \- use new VarDict
to prevent deprecation warning \([https\://github\.com/ansible\-collections/community\.general/issues/8410](https\://github\.com/ansible\-collections/community\.general/issues/8410)\, [https\://github\.com/ansible\-collections/community\.general/pull/8411](https\://github\.com/ansible\-collections/community\.general/pull/8411)\)\.
-* dig lookup plugin \- fix using only the last nameserver specified \([https\://github\.com/ansible\-collections/community\.general/pull/8970](https\://github\.com/ansible\-collections/community\.general/pull/8970)\)\.
-* django module utils \- use new VarDict
to prevent deprecation warning \([https\://github\.com/ansible\-collections/community\.general/issues/8410](https\://github\.com/ansible\-collections/community\.general/issues/8410)\, [https\://github\.com/ansible\-collections/community\.general/pull/8411](https\://github\.com/ansible\-collections/community\.general/pull/8411)\)\.
-* django\_command \- option command
is now split lexically before passed to underlying PythonRunner \([https\://github\.com/ansible\-collections/community\.general/pull/8944](https\://github\.com/ansible\-collections/community\.general/pull/8944)\)\.
-* gconftool2\_info \- use new VarDict
to prevent deprecation warning \([https\://github\.com/ansible\-collections/community\.general/issues/8410](https\://github\.com/ansible\-collections/community\.general/issues/8410)\, [https\://github\.com/ansible\-collections/community\.general/pull/8411](https\://github\.com/ansible\-collections/community\.general/pull/8411)\)\.
-* git\_config \- fix behavior of state\=absent
if value
is present \([https\://github\.com/ansible\-collections/community\.general/issues/8436](https\://github\.com/ansible\-collections/community\.general/issues/8436)\, [https\://github\.com/ansible\-collections/community\.general/pull/8452](https\://github\.com/ansible\-collections/community\.general/pull/8452)\)\.
-* gitlab\_group\_access\_token \- fix crash in check mode caused by attempted access to a newly created access token \([https\://github\.com/ansible\-collections/community\.general/pull/8796](https\://github\.com/ansible\-collections/community\.general/pull/8796)\)\.
-* gitlab\_label \- update label\'s color \([https\://github\.com/ansible\-collections/community\.general/pull/9010](https\://github\.com/ansible\-collections/community\.general/pull/9010)\)\.
-* gitlab\_project \- fix container\_expiration\_policy
not being applied when creating a new project \([https\://github\.com/ansible\-collections/community\.general/pull/8790](https\://github\.com/ansible\-collections/community\.general/pull/8790)\)\.
-* gitlab\_project \- fix crash caused by old Gitlab projects not having a container\_expiration\_policy
attribute \([https\://github\.com/ansible\-collections/community\.general/pull/8790](https\://github\.com/ansible\-collections/community\.general/pull/8790)\)\.
-* gitlab\_project\_access\_token \- fix crash in check mode caused by attempted access to a newly created access token \([https\://github\.com/ansible\-collections/community\.general/pull/8796](https\://github\.com/ansible\-collections/community\.general/pull/8796)\)\.
-* gitlab\_runner \- fix paused
parameter being ignored \([https\://github\.com/ansible\-collections/community\.general/pull/8648](https\://github\.com/ansible\-collections/community\.general/pull/8648)\)\.
-* homebrew \- do not fail when brew prints warnings \([https\://github\.com/ansible\-collections/community\.general/pull/8406](https\://github\.com/ansible\-collections/community\.general/pull/8406)\, [https\://github\.com/ansible\-collections/community\.general/issues/7044](https\://github\.com/ansible\-collections/community\.general/issues/7044)\)\.
-* homebrew\_cask \- fix upgrade\_all
returns changed
when nothing upgraded \([https\://github\.com/ansible\-collections/community\.general/issues/8707](https\://github\.com/ansible\-collections/community\.general/issues/8707)\, [https\://github\.com/ansible\-collections/community\.general/pull/8708](https\://github\.com/ansible\-collections/community\.general/pull/8708)\)\.
-* homectl \- the module now tries to use legacycrypt
on Python 3\.13\+ \([https\://github\.com/ansible\-collections/community\.general/issues/4691](https\://github\.com/ansible\-collections/community\.general/issues/4691)\, [https\://github\.com/ansible\-collections/community\.general/pull/8987](https\://github\.com/ansible\-collections/community\.general/pull/8987)\)\.
-* hponcfg \- use new VarDict
to prevent deprecation warning \([https\://github\.com/ansible\-collections/community\.general/issues/8410](https\://github\.com/ansible\-collections/community\.general/issues/8410)\, [https\://github\.com/ansible\-collections/community\.general/pull/8411](https\://github\.com/ansible\-collections/community\.general/pull/8411)\)\.
-* ini\_file \- pass absolute paths to module\.atomic\_move\(\)
\([https\://github\.com/ansible/ansible/issues/83950](https\://github\.com/ansible/ansible/issues/83950)\, [https\://github\.com/ansible\-collections/community\.general/pull/8925](https\://github\.com/ansible\-collections/community\.general/pull/8925)\)\.
-* ipa\_host \- add force\_create
\, fix enabled
and disabled
states \([https\://github\.com/ansible\-collections/community\.general/issues/1094](https\://github\.com/ansible\-collections/community\.general/issues/1094)\, [https\://github\.com/ansible\-collections/community\.general/pull/8920](https\://github\.com/ansible\-collections/community\.general/pull/8920)\)\.
-* ipa\_hostgroup \- fix enabled \`\` and \`\`disabled
states \([https\://github\.com/ansible\-collections/community\.general/issues/8408](https\://github\.com/ansible\-collections/community\.general/issues/8408)\, [https\://github\.com/ansible\-collections/community\.general/pull/8900](https\://github\.com/ansible\-collections/community\.general/pull/8900)\)\.
-* java\_keystore \- pass absolute paths to module\.atomic\_move\(\)
\([https\://github\.com/ansible/ansible/issues/83950](https\://github\.com/ansible/ansible/issues/83950)\, [https\://github\.com/ansible\-collections/community\.general/pull/8925](https\://github\.com/ansible\-collections/community\.general/pull/8925)\)\.
-* jenkins\_node \- fixed enabled
\, disable
and absent
node state redirect authorization issues\, same as was present for present
\([https\://github\.com/ansible\-collections/community\.general/pull/9084](https\://github\.com/ansible\-collections/community\.general/pull/9084)\)\.
-* jenkins\_plugin \- pass absolute paths to module\.atomic\_move\(\)
\([https\://github\.com/ansible/ansible/issues/83950](https\://github\.com/ansible/ansible/issues/83950)\, [https\://github\.com/ansible\-collections/community\.general/pull/8925](https\://github\.com/ansible\-collections/community\.general/pull/8925)\)\.
-* kdeconfig \- pass absolute paths to module\.atomic\_move\(\)
\([https\://github\.com/ansible/ansible/issues/83950](https\://github\.com/ansible/ansible/issues/83950)\, [https\://github\.com/ansible\-collections/community\.general/pull/8925](https\://github\.com/ansible\-collections/community\.general/pull/8925)\)\.
-* kernel\_blacklist \- use new VarDict
to prevent deprecation warning \([https\://github\.com/ansible\-collections/community\.general/issues/8410](https\://github\.com/ansible\-collections/community\.general/issues/8410)\, [https\://github\.com/ansible\-collections/community\.general/pull/8411](https\://github\.com/ansible\-collections/community\.general/pull/8411)\)\.
-* keycloak\_client \- fix TypeError when sanitizing the saml\.signing\.private\.key
attribute in the module\'s diff or state output\. The sanitize\_cr
function expected a dict where in some cases a list might occur \([https\://github\.com/ansible\-collections/community\.general/pull/8403](https\://github\.com/ansible\-collections/community\.general/pull/8403)\)\.
-* keycloak\_clientscope \- remove IDs from clientscope and its protocol mappers on comparison for changed check \([https\://github\.com/ansible\-collections/community\.general/pull/8545](https\://github\.com/ansible\-collections/community\.general/pull/8545)\)\.
-* keycloak\_clientscope\_type \- fix detect changes in check mode \([https\://github\.com/ansible\-collections/community\.general/issues/9092](https\://github\.com/ansible\-collections/community\.general/issues/9092)\, [https\://github\.com/ansible\-collections/community\.general/pull/9093](https\://github\.com/ansible\-collections/community\.general/pull/9093)\)\.
-* keycloak\_group \- fix crash caused in subgroup creation\. The crash was caused by a missing or empty subGroups
property in Keycloak ≥23 \([https\://github\.com/ansible\-collections/community\.general/issues/8788](https\://github\.com/ansible\-collections/community\.general/issues/8788)\, [https\://github\.com/ansible\-collections/community\.general/pull/8979](https\://github\.com/ansible\-collections/community\.general/pull/8979)\)\.
-* keycloak\_realm \- add normalizations for attributes
and protocol\_mappers
\([https\://github\.com/ansible\-collections/community\.general/pull/8496](https\://github\.com/ansible\-collections/community\.general/pull/8496)\)\.
-* keycloak\_realm \- fix change detection in check mode by sorting the lists in the realms beforehand \([https\://github\.com/ansible\-collections/community\.general/pull/8877](https\://github\.com/ansible\-collections/community\.general/pull/8877)\)\.
-* keycloak\_realm\_key \- fix invalid usage of parent\_id
\([https\://github\.com/ansible\-collections/community\.general/issues/7850](https\://github\.com/ansible\-collections/community\.general/issues/7850)\, [https\://github\.com/ansible\-collections/community\.general/pull/8823](https\://github\.com/ansible\-collections/community\.general/pull/8823)\)\.
-* keycloak\_user\_federation \- add module argument allowing users to configure the update mode for the parameter bindCredential
\([https\://github\.com/ansible\-collections/community\.general/pull/8898](https\://github\.com/ansible\-collections/community\.general/pull/8898)\)\.
-* keycloak\_user\_federation \- fix key error when removing mappers during an update and new mappers are specified in the module args \([https\://github\.com/ansible\-collections/community\.general/pull/8762](https\://github\.com/ansible\-collections/community\.general/pull/8762)\)\.
-* keycloak\_user\_federation \- fix the UnboundLocalError
that occurs when an ID is provided for a user federation mapper \([https\://github\.com/ansible\-collections/community\.general/pull/8831](https\://github\.com/ansible\-collections/community\.general/pull/8831)\)\.
-* keycloak\_user\_federation \- get cleartext IDP clientSecret
from full realm info to detect changes to it \([https\://github\.com/ansible\-collections/community\.general/issues/8294](https\://github\.com/ansible\-collections/community\.general/issues/8294)\, [https\://github\.com/ansible\-collections/community\.general/pull/8735](https\://github\.com/ansible\-collections/community\.general/pull/8735)\)\.
-* keycloak\_user\_federation \- minimize change detection by setting krbPrincipalAttribute
to \'\'
in Keycloak responses if missing \([https\://github\.com/ansible\-collections/community\.general/pull/8785](https\://github\.com/ansible\-collections/community\.general/pull/8785)\)\.
-* keycloak\_user\_federation \- remove lastSync
parameter from Keycloak responses to minimize diff/changes \([https\://github\.com/ansible\-collections/community\.general/pull/8812](https\://github\.com/ansible\-collections/community\.general/pull/8812)\)\.
-* keycloak\_user\_federation \- remove existing user federation mappers if they are not present in the federation configuration and will not be updated \([https\://github\.com/ansible\-collections/community\.general/issues/7169](https\://github\.com/ansible\-collections/community\.general/issues/7169)\, [https\://github\.com/ansible\-collections/community\.general/pull/8695](https\://github\.com/ansible\-collections/community\.general/pull/8695)\)\.
-* keycloak\_user\_federation \- sort desired and after mapper list by name \(analog to before mapper list\) to minimize diff and make change detection more accurate \([https\://github\.com/ansible\-collections/community\.general/pull/8761](https\://github\.com/ansible\-collections/community\.general/pull/8761)\)\.
-* keycloak\_userprofile \- fix empty response when fetching userprofile component by removing parent\=parent\_id
filter \([https\://github\.com/ansible\-collections/community\.general/pull/8923](https\://github\.com/ansible\-collections/community\.general/pull/8923)\)\.
-* keycloak\_userprofile \- improve diff by deserializing the fetched kc\.user\.profile\.config
and serialize it only when sending back \([https\://github\.com/ansible\-collections/community\.general/pull/8940](https\://github\.com/ansible\-collections/community\.general/pull/8940)\)\.
-* launched \- correctly report changed status in check mode \([https\://github\.com/ansible\-collections/community\.general/pull/8406](https\://github\.com/ansible\-collections/community\.general/pull/8406)\)\.
-* locale\_gen \- use new VarDict
to prevent deprecation warning \([https\://github\.com/ansible\-collections/community\.general/issues/8410](https\://github\.com/ansible\-collections/community\.general/issues/8410)\, [https\://github\.com/ansible\-collections/community\.general/pull/8411](https\://github\.com/ansible\-collections/community\.general/pull/8411)\)\.
-* lxd\_container \- fix bug introduced in previous commit \([https\://github\.com/ansible\-collections/community\.general/pull/8895](https\://github\.com/ansible\-collections/community\.general/pull/8895)\, [https\://github\.com/ansible\-collections/community\.general/issues/8888](https\://github\.com/ansible\-collections/community\.general/issues/8888)\)\.
-* mksysb \- use new VarDict
to prevent deprecation warning \([https\://github\.com/ansible\-collections/community\.general/issues/8410](https\://github\.com/ansible\-collections/community\.general/issues/8410)\, [https\://github\.com/ansible\-collections/community\.general/pull/8411](https\://github\.com/ansible\-collections/community\.general/pull/8411)\)\.
-* modprobe \- fix check mode not being honored for persistent
option \([https\://github\.com/ansible\-collections/community\.general/issues/9051](https\://github\.com/ansible\-collections/community\.general/issues/9051)\, [https\://github\.com/ansible\-collections/community\.general/pull/9052](https\://github\.com/ansible\-collections/community\.general/pull/9052)\)\.
-* nsupdate \- fix \'index out of range\' error when changing NS records by falling back to authority section of the response \([https\://github\.com/ansible\-collections/community\.general/issues/8612](https\://github\.com/ansible\-collections/community\.general/issues/8612)\, [https\://github\.com/ansible\-collections/community\.general/pull/8614](https\://github\.com/ansible\-collections/community\.general/pull/8614)\)\.
-* one\_host \- fix if statements for cases when ID\=0
\([https\://github\.com/ansible\-collections/community\.general/issues/1199](https\://github\.com/ansible\-collections/community\.general/issues/1199)\, [https\://github\.com/ansible\-collections/community\.general/pull/8907](https\://github\.com/ansible\-collections/community\.general/pull/8907)\)\.
-* one\_image \- fix module failing due to a class method typo \([https\://github\.com/ansible\-collections/community\.general/pull/9056](https\://github\.com/ansible\-collections/community\.general/pull/9056)\)\.
-* one\_image\_info \- fix module failing due to a class method typo \([https\://github\.com/ansible\-collections/community\.general/pull/9056](https\://github\.com/ansible\-collections/community\.general/pull/9056)\)\.
-* one\_service \- fix service creation after it was deleted with unique
parameter \([https\://github\.com/ansible\-collections/community\.general/issues/3137](https\://github\.com/ansible\-collections/community\.general/issues/3137)\, [https\://github\.com/ansible\-collections/community\.general/pull/8887](https\://github\.com/ansible\-collections/community\.general/pull/8887)\)\.
-* one\_vnet \- fix module failing due to a variable typo \([https\://github\.com/ansible\-collections/community\.general/pull/9019](https\://github\.com/ansible\-collections/community\.general/pull/9019)\)\.
-* opennebula inventory plugin \- fix invalid reference to IP when inventory runs against NICs with no IPv4 address \([https\://github\.com/ansible\-collections/community\.general/pull/8489](https\://github\.com/ansible\-collections/community\.general/pull/8489)\)\.
-* opentelemetry callback \- do not save the JSON response when using the ansible\.builtin\.uri
module \([https\://github\.com/ansible\-collections/community\.general/pull/8430](https\://github\.com/ansible\-collections/community\.general/pull/8430)\)\.
-* opentelemetry callback \- do not save the content response when using the ansible\.builtin\.slurp
module \([https\://github\.com/ansible\-collections/community\.general/pull/8430](https\://github\.com/ansible\-collections/community\.general/pull/8430)\)\.
-* pam\_limits \- pass absolute paths to module\.atomic\_move\(\)
\([https\://github\.com/ansible/ansible/issues/83950](https\://github\.com/ansible/ansible/issues/83950)\, [https\://github\.com/ansible\-collections/community\.general/pull/8925](https\://github\.com/ansible\-collections/community\.general/pull/8925)\)\.
-* paman \- do not fail if an empty list of packages has been provided and there is nothing to do \([https\://github\.com/ansible\-collections/community\.general/pull/8514](https\://github\.com/ansible\-collections/community\.general/pull/8514)\)\.
-* pipx \- it was ignoring global
when listing existing applications \([https\://github\.com/ansible\-collections/community\.general/pull/9044](https\://github\.com/ansible\-collections/community\.general/pull/9044)\)\.
-* pipx module utils \- add missing command line formatter for argument spec\_metadata
\([https\://github\.com/ansible\-collections/community\.general/pull/9044](https\://github\.com/ansible\-collections/community\.general/pull/9044)\)\.
-* pipx\_info \- use new VarDict
to prevent deprecation warning \([https\://github\.com/ansible\-collections/community\.general/issues/8410](https\://github\.com/ansible\-collections/community\.general/issues/8410)\, [https\://github\.com/ansible\-collections/community\.general/pull/8411](https\://github\.com/ansible\-collections/community\.general/pull/8411)\)\.
-* proxmox \- fix idempotency on creation of mount volumes using Proxmox\' special \\:\
syntax \([https\://github\.com/ansible\-collections/community\.general/issues/8407](https\://github\.com/ansible\-collections/community\.general/issues/8407)\, [https\://github\.com/ansible\-collections/community\.general/pull/8542](https\://github\.com/ansible\-collections/community\.general/pull/8542)\)\.
-* proxmox \- fixed an issue where the new volume handling incorrectly converted null
values into \"None\"
strings \([https\://github\.com/ansible\-collections/community\.general/pull/8646](https\://github\.com/ansible\-collections/community\.general/pull/8646)\)\.
-* proxmox \- fixed an issue where volume strings where overwritten instead of appended to in the new build\_volume\(\)
method \([https\://github\.com/ansible\-collections/community\.general/pull/8646](https\://github\.com/ansible\-collections/community\.general/pull/8646)\)\.
-* proxmox \- removed the forced conversion of non\-string values to strings to be consistent with the module documentation \([https\://github\.com/ansible\-collections/community\.general/pull/8646](https\://github\.com/ansible\-collections/community\.general/pull/8646)\)\.
-* proxmox inventory plugin \- fixed a possible error on concatenating responses from proxmox\. In case an API call unexpectedly returned an empty result\, the inventory failed with a fatal error\. Added check for empty response \([https\://github\.com/ansible\-collections/community\.general/issues/8798](https\://github\.com/ansible\-collections/community\.general/issues/8798)\, [https\://github\.com/ansible\-collections/community\.general/pull/8794](https\://github\.com/ansible\-collections/community\.general/pull/8794)\)\.
-* python\_runner module utils \- parameter path\_prefix
was being handled as string when it should be a list \([https\://github\.com/ansible\-collections/community\.general/pull/8944](https\://github\.com/ansible\-collections/community\.general/pull/8944)\)\.
-* redfish\_utils module utils \- do not fail when language is not exactly \"en\" \([https\://github\.com/ansible\-collections/community\.general/pull/8613](https\://github\.com/ansible\-collections/community\.general/pull/8613)\)\.
-* redfish\_utils module utils \- fix issue with URI parsing to gracefully handling trailing slashes when extracting member identifiers \([https\://github\.com/ansible\-collections/community\.general/issues/9047](https\://github\.com/ansible\-collections/community\.general/issues/9047)\, [https\://github\.com/ansible\-collections/community\.general/pull/9057](https\://github\.com/ansible\-collections/community\.general/pull/9057)\)\.
-* snap \- use new VarDict
to prevent deprecation warning \([https\://github\.com/ansible\-collections/community\.general/issues/8410](https\://github\.com/ansible\-collections/community\.general/issues/8410)\, [https\://github\.com/ansible\-collections/community\.general/pull/8411](https\://github\.com/ansible\-collections/community\.general/pull/8411)\)\.
-* snap\_alias \- use new VarDict
to prevent deprecation warning \([https\://github\.com/ansible\-collections/community\.general/issues/8410](https\://github\.com/ansible\-collections/community\.general/issues/8410)\, [https\://github\.com/ansible\-collections/community\.general/pull/8411](https\://github\.com/ansible\-collections/community\.general/pull/8411)\)\.
-* udm\_user \- the module now tries to use legacycrypt
on Python 3\.13\+ \([https\://github\.com/ansible\-collections/community\.general/issues/4690](https\://github\.com/ansible\-collections/community\.general/issues/4690)\, [https\://github\.com/ansible\-collections/community\.general/pull/8987](https\://github\.com/ansible\-collections/community\.general/pull/8987)\)\.
-
-
-### Known Issues
-
-* jenkins\_node \- the module is not able to update offline message when node is already offline due to internally using toggleOffline API \([https\://github\.com/ansible\-collections/community\.general/pull/9084](https\://github\.com/ansible\-collections/community\.general/pull/9084)\)\.
-
-
-### New Plugins
-
-
-#### Filter
-
-* community\.general\.keep\_keys \- Keep specific keys from dictionaries in a list\.
-* community\.general\.remove\_keys \- Remove specific keys from dictionaries in a list\.
-* community\.general\.replace\_keys \- Replace specific keys in a list of dictionaries\.
-* community\.general\.reveal\_ansible\_type \- Return input type\.
-
-
-#### Test
-
-* community\.general\.ansible\_type \- Validate input type\.
-
-
-### New Modules
-
-* community\.general\.bootc\_manage \- Bootc Switch and Upgrade\.
-* community\.general\.consul\_agent\_check \- Add\, modify\, and delete checks within a consul cluster\.
-* community\.general\.consul\_agent\_service \- Add\, modify and delete services within a consul cluster\.
-* community\.general\.django\_check \- Wrapper for C\(django\-admin check\)\.
-* community\.general\.django\_createcachetable \- Wrapper for C\(django\-admin createcachetable\)\.
-* community\.general\.homebrew\_services \- Services manager for Homebrew\.
-* community\.general\.ipa\_getkeytab \- Manage keytab file in FreeIPA\.
-* community\.general\.jenkins\_node \- Manage Jenkins nodes\.
-* community\.general\.keycloak\_component \- Allows administration of Keycloak components via Keycloak API\.
-* community\.general\.keycloak\_realm\_keys\_metadata\_info \- Allows obtaining Keycloak realm keys metadata via Keycloak API\.
-* community\.general\.keycloak\_userprofile \- Allows managing Keycloak User Profiles\.
-* community\.general\.krb\_ticket \- Kerberos utils for managing tickets\.
-* community\.general\.one\_vnet \- Manages OpenNebula virtual networks\.
-* community\.general\.zypper\_repository\_info \- List Zypper repositories\.
+This file is a placeholder; a version-specific `CHANGELOG-vX.md` will be generated during releases from fragments
+under `changelogs/fragments`. On release branches once a release has been created, consult the branch's version-specific
+file for changes that have occurred in that branch.
diff --git a/CHANGELOG.rst b/CHANGELOG.rst
index 4e38d14eb1..119e04e170 100644
--- a/CHANGELOG.rst
+++ b/CHANGELOG.rst
@@ -1,1014 +1,6 @@
-===============================
-Community General Release Notes
-===============================
+Placeholder changelog
+=====================
-.. contents:: Topics
-
-This changelog describes changes after version 9.0.0.
-
-v10.6.0
-=======
-
-Release Summary
----------------
-
-Regular bugfix and feature release.
-
-Minor Changes
--------------
-
-- apache2_module - added workaround for new PHP module name, from ``php7_module`` to ``php_module`` (https://github.com/ansible-collections/community.general/pull/9951).
-- gitlab_project - add option ``build_timeout`` (https://github.com/ansible-collections/community.general/pull/9960).
-- gitlab_project_members - extend choices parameter ``access_level`` by missing upstream valid value ``owner`` (https://github.com/ansible-collections/community.general/pull/9953).
-- hpilo_boot - add option to get an idempotent behavior while powering on server, resulting in success instead of failure when using ``state: boot_once`` option (https://github.com/ansible-collections/community.general/pull/9646).
-- idrac_redfish_command, idrac_redfish_config, idrac_redfish_info - add ``validate_certs``, ``ca_path``, and ``ciphers`` options to configure TLS/SSL (https://github.com/ansible-collections/community.general/issues/3686, https://github.com/ansible-collections/community.general/pull/9964).
-- ilo_redfish_command, ilo_redfish_config, ilo_redfish_info - add ``validate_certs``, ``ca_path``, and ``ciphers`` options to configure TLS/SSL (https://github.com/ansible-collections/community.general/issues/3686, https://github.com/ansible-collections/community.general/pull/9964).
-- keycloak module_utils - user groups can now be referenced by their name, like ``staff``, or their path, like ``/staff/engineering``. The path syntax allows users to reference subgroups, which is not possible otherwise (https://github.com/ansible-collections/community.general/pull/9898).
-- keycloak_user module - user groups can now be referenced by their name, like ``staff``, or their path, like ``/staff/engineering``. The path syntax allows users to reference subgroups, which is not possible otherwise (https://github.com/ansible-collections/community.general/pull/9898).
-- nmcli - add support for Infiniband MAC setting when ``type`` is ``infiniband`` (https://github.com/ansible-collections/community.general/pull/9962).
-- one_vm - update allowed values for ``updateconf`` to include new parameters as per the latest OpenNebula API documentation.
- Added parameters:
-
- * ``OS``: ``FIRMWARE``;
- * ``CPU_MODEL``: ``MODEL``, ``FEATURES``;
- * ``FEATURES``: ``VIRTIO_BLK_QUEUES``, ``VIRTIO_SCSI_QUEUES``, ``IOTHREADS``;
- * ``GRAPHICS``: ``PORT``, ``COMMAND``;
- * ``VIDEO``: ``ATS``, ``IOMMU``, ``RESOLUTION``, ``TYPE``, ``VRAM``;
- * ``RAW``: ``VALIDATE``;
- * ``BACKUP_CONFIG``: ``FS_FREEZE``, ``KEEP_LAST``, ``BACKUP_VOLATILE``, ``MODE``, ``INCREMENT_MODE``.
-
- (https://github.com/ansible-collections/community.general/pull/9959).
-- proxmox and proxmox_kvm modules - allow uppercase characters in VM/container tags (https://github.com/ansible-collections/community.general/issues/9895, https://github.com/ansible-collections/community.general/pull/10024).
-- puppet - improve parameter formatting, no impact to user (https://github.com/ansible-collections/community.general/pull/10014).
-- redfish module utils - add ``REDFISH_COMMON_ARGUMENT_SPEC``, a corresponding ``redfish`` docs fragment, and support for its ``validate_certs``, ``ca_path``, and ``ciphers`` options (https://github.com/ansible-collections/community.general/issues/3686, https://github.com/ansible-collections/community.general/pull/9964).
-- redfish_command, redfish_config, redfish_info - add ``validate_certs`` and ``ca_path`` options to configure TLS/SSL (https://github.com/ansible-collections/community.general/issues/3686, https://github.com/ansible-collections/community.general/pull/9964).
-- rocketchat - fix duplicate JSON conversion for Rocket.Chat < 7.4.0 (https://github.com/ansible-collections/community.general/pull/9965).
-- wdc_redfish_command, wdc_redfish_info - add ``validate_certs``, ``ca_path``, and ``ciphers`` options to configure TLS/SSL (https://github.com/ansible-collections/community.general/issues/3686, https://github.com/ansible-collections/community.general/pull/9964).
-- xcc_redfish_command - add ``validate_certs``, ``ca_path``, and ``ciphers`` options to configure TLS/SSL (https://github.com/ansible-collections/community.general/issues/3686, https://github.com/ansible-collections/community.general/pull/9964).
-- zypper - adds ``skip_post_errors`` that allows to skip RPM post-install errors (Zypper return code 107) (https://github.com/ansible-collections/community.general/issues/9972).
-
-Deprecated Features
--------------------
-
-- manifold lookup plugin - plugin is deprecated and will be removed in community.general 11.0.0 (https://github.com/ansible-collections/community.general/pull/10028).
-- stackpath_compute inventory plugin - plugin is deprecated and will be removed in community.general 11.0.0 (https://github.com/ansible-collections/community.general/pull/10026).
-
-Bugfixes
---------
-
-- dependent look plugin - make compatible with ansible-core's Data Tagging feature (https://github.com/ansible-collections/community.general/pull/9833).
-- diy callback plugin - make compatible with ansible-core's Data Tagging feature (https://github.com/ansible-collections/community.general/pull/9833).
-- github_deploy_key - check that key really exists on 422 to avoid masking other errors (https://github.com/ansible-collections/community.general/issues/6718, https://github.com/ansible-collections/community.general/pull/10011).
-- hashids and unicode_normalize filter plugins - avoid deprecated ``AnsibleFilterTypeError`` on ansible-core 2.19 (https://github.com/ansible-collections/community.general/pull/9992).
-- homebrew - emit a useful error message if ``brew info`` reports a package tap is ``null`` (https://github.com/ansible-collections/community.general/pull/10013, https://github.com/ansible-collections/community.general/issues/10012).
-- java_cert - the module no longer fails if the optional parameters ``pkcs12_alias`` and ``cert_alias`` are not provided (https://github.com/ansible-collections/community.general/pull/9970).
-- keycloak_authentication - fix authentification config duplication for Keycloak < 26.2.0 (https://github.com/ansible-collections/community.general/pull/9987).
-- keycloak_client - fix the idempotency regression by normalizing the Keycloak response for ``after_client`` (https://github.com/ansible-collections/community.general/issues/9905, https://github.com/ansible-collections/community.general/pull/9976).
-- proxmox inventory plugin - fix ``ansible_host`` staying empty for certain Proxmox nodes (https://github.com/ansible-collections/community.general/issues/5906, https://github.com/ansible-collections/community.general/pull/9952).
-- proxmox_disk - fail gracefully if ``storage`` is required but not provided by the user (https://github.com/ansible-collections/community.general/issues/9941, https://github.com/ansible-collections/community.general/pull/9963).
-- reveal_ansible_type filter plugin and ansible_type test plugin - make compatible with ansible-core's Data Tagging feature (https://github.com/ansible-collections/community.general/pull/9833).
-- sysrc - no longer always reporting ``changed=true`` when ``state=absent``. This fixes the method ``exists()`` (https://github.com/ansible-collections/community.general/issues/10004, https://github.com/ansible-collections/community.general/pull/10005).
-- yaml callback plugin - use ansible-core internals to avoid breakage with Data Tagging (https://github.com/ansible-collections/community.general/pull/9833).
-
-Known Issues
-------------
-
-- reveal_ansible_type filter plugin and ansible_type test plugin - note that ansible-core's Data Tagging feature implements new aliases, such as ``_AnsibleTaggedStr`` for ``str``, ``_AnsibleTaggedInt`` for ``int``, and ``_AnsibleTaggedFloat`` for ``float`` (https://github.com/ansible-collections/community.general/pull/9833).
-
-New Plugins
------------
-
-Connection
-~~~~~~~~~~
-
-- community.general.wsl - Run tasks in WSL distribution using wsl.exe CLI via SSH.
-
-v10.5.0
-=======
-
-Release Summary
----------------
-
-Regular bugfix and feature release.
-
-Minor Changes
--------------
-
-- CmdRunner module utils - the convenience method ``cmd_runner_fmt.as_fixed()`` now accepts multiple arguments as a list (https://github.com/ansible-collections/community.general/pull/9893).
-- apache2_mod_proxy - code simplification, no change in functionality (https://github.com/ansible-collections/community.general/pull/9457).
-- consul_token - fix idempotency when ``policies`` or ``roles`` are supplied by name (https://github.com/ansible-collections/community.general/issues/9841, https://github.com/ansible-collections/community.general/pull/9845).
-- keycloak_realm - remove ID requirement when creating a realm to allow Keycloak generating its own realm ID (https://github.com/ansible-collections/community.general/pull/9768).
-- nmap inventory plugin - adds ``dns_servers`` option for specifying DNS servers for name resolution. Accepts hostnames or IP addresses in the same format as the ``exclude`` option (https://github.com/ansible-collections/community.general/pull/9849).
-- proxmox_kvm - add missing audio hardware device handling (https://github.com/ansible-collections/community.general/issues/5192, https://github.com/ansible-collections/community.general/pull/9847).
-- redfish_config - add command ``SetPowerRestorePolicy`` to set the desired power state of the system when power is restored (https://github.com/ansible-collections/community.general/pull/9837).
-- redfish_info - add command ``GetPowerRestorePolicy`` to get the desired power state of the system when power is restored (https://github.com/ansible-collections/community.general/pull/9824).
-- rocketchat - option ``is_pre740`` has been added to control the format of the payload. For Rocket.Chat 7.4.0 or newer, it must be set to ``false`` (https://github.com/ansible-collections/community.general/pull/9882).
-- slack callback plugin - add ``http_agent`` option to enable the user to set a custom user agent for slack callback plugin (https://github.com/ansible-collections/community.general/issues/9813, https://github.com/ansible-collections/community.general/pull/9836).
-- systemd_info - add wildcard expression support in ``unitname`` option (https://github.com/ansible-collections/community.general/pull/9821).
-- systemd_info - extend support to timer units (https://github.com/ansible-collections/community.general/pull/9891).
-- vmadm - add new options ``flexible_disk_size`` and ``owner_uuid`` (https://github.com/ansible-collections/community.general/pull/9892).
-
-Bugfixes
---------
-
-- cloudlare_dns - handle exhausted response stream in case of HTTP errors to show nice error message to the user (https://github.com/ansible-collections/community.general/issues/9782, https://github.com/ansible-collections/community.general/pull/9818).
-- dnf_versionlock - add support for dnf5 (https://github.com/ansible-collections/community.general/issues/9556).
-- homebrew - fix crash when package names include tap (https://github.com/ansible-collections/community.general/issues/9777, https://github.com/ansible-collections/community.general/pull/9803).
-- homebrew_cask - handle unusual brew version strings (https://github.com/ansible-collections/community.general/issues/8432, https://github.com/ansible-collections/community.general/pull/9881).
-- nmcli - enable changing only the order of DNS servers or search suffixes (https://github.com/ansible-collections/community.general/issues/8724, https://github.com/ansible-collections/community.general/pull/9880).
-- proxmox - add missing key selection of ``'status'`` key to ``get_lxc_status`` (https://github.com/ansible-collections/community.general/issues/9696, https://github.com/ansible-collections/community.general/pull/9809).
-- proxmox_vm_info - the module no longer expects that the key ``template`` exists in a dictionary returned by Proxmox (https://github.com/ansible-collections/community.general/issues/9875, https://github.com/ansible-collections/community.general/pull/9910).
-- sudoers - display stdout and stderr raised while failed validation (https://github.com/ansible-collections/community.general/issues/9674, https://github.com/ansible-collections/community.general/pull/9871).
-
-New Modules
------------
-
-- community.general.pacemaker_resource - Manage pacemaker resources.
-
-v10.4.0
-=======
-
-Release Summary
----------------
-
-Regular bugfix and feature release.
-
-Minor Changes
--------------
-
-- bitwarden lookup plugin - add new option ``collection_name`` to filter results by collection name, and new option ``result_count`` to validate number of results (https://github.com/ansible-collections/community.general/pull/9728).
-- incus connection plugin - adds ``remote_user`` and ``incus_become_method`` parameters for allowing a non-root user to connect to an Incus instance (https://github.com/ansible-collections/community.general/pull/9743).
-- iocage inventory plugin - the new parameter ``hooks_results`` of the plugin is a list of files inside a jail that provide configuration parameters for the inventory. The inventory plugin reads the files from the jails and put the contents into the items of created variable ``iocage_hooks`` (https://github.com/ansible-collections/community.general/issues/9650, https://github.com/ansible-collections/community.general/pull/9651).
-- jira - adds ``client_cert`` and ``client_key`` parameters for supporting client certificate authentification when connecting to Jira (https://github.com/ansible-collections/community.general/pull/9753).
-- lldp - adds ``multivalues`` parameter to control behavior when lldpctl outputs an attribute multiple times (https://github.com/ansible-collections/community.general/pull/9657).
-- lvg - add ``remove_extra_pvs`` parameter to control if ansible should remove physical volumes which are not in the ``pvs`` parameter (https://github.com/ansible-collections/community.general/pull/9698).
-- lxd connection plugin - adds ``remote_user`` and ``lxd_become_method`` parameters for allowing a non-root user to connect to an LXD instance (https://github.com/ansible-collections/community.general/pull/9659).
-- nmcli - adds VRF support with new ``type`` value ``vrf`` and new ``slave_type`` value ``vrf`` as well as new ``table`` parameter (https://github.com/ansible-collections/community.general/pull/9658, https://github.com/ansible-collections/community.general/issues/8014).
-- proxmox_kvm - allow hibernation and suspending of VMs (https://github.com/ansible-collections/community.general/issues/9620, https://github.com/ansible-collections/community.general/pull/9653).
-- redfish_command - add ``PowerFullPowerCycle`` to power command options (https://github.com/ansible-collections/community.general/pull/9729).
-- ssh_config - add ``other_options`` option (https://github.com/ansible-collections/community.general/issues/8053, https://github.com/ansible-collections/community.general/pull/9684).
-- xen_orchestra inventory plugin - add ``use_vm_uuid`` and ``use_host_uuid`` boolean options to allow switching over to using VM/Xen name labels instead of UUIDs as item names (https://github.com/ansible-collections/community.general/pull/9787).
-
-Deprecated Features
--------------------
-
-- profitbricks - module is deprecated and will be removed in community.general 11.0.0 (https://github.com/ansible-collections/community.general/pull/9733).
-- profitbricks_datacenter - module is deprecated and will be removed in community.general 11.0.0 (https://github.com/ansible-collections/community.general/pull/9733).
-- profitbricks_nic - module is deprecated and will be removed in community.general 11.0.0 (https://github.com/ansible-collections/community.general/pull/9733).
-- profitbricks_volume - module is deprecated and will be removed in community.general 11.0.0 (https://github.com/ansible-collections/community.general/pull/9733).
-- profitbricks_volume_attachments - module is deprecated and will be removed in community.general 11.0.0 (https://github.com/ansible-collections/community.general/pull/9733).
-
-Bugfixes
---------
-
-- apache2_mod_proxy - make compatible with Python 3 (https://github.com/ansible-collections/community.general/pull/9762).
-- apache2_mod_proxy - passing the cluster's page as referer for the member's pages. This makes the module actually work again for halfway modern Apache versions. According to some comments founds on the net the referer was required since at least 2019 for some versions of Apache 2 (https://github.com/ansible-collections/community.general/pull/9762).
-- elasticsearch_plugin - fix ``ERROR: D is not a recognized option`` issue when configuring proxy settings (https://github.com/ansible-collections/community.general/pull/9774, https://github.com/ansible-collections/community.general/issues/9773).
-- ipa_host - module revoked existing host certificates even if ``user_certificate`` was not given (https://github.com/ansible-collections/community.general/pull/9694).
-- keycloak_client - in check mode, detect whether the lists in before client (for example redirect URI list) contain items that the lists in the desired client do not contain (https://github.com/ansible-collections/community.general/pull/9739).
-- lldp - fix crash caused by certain lldpctl output where an attribute is defined as branch and leaf (https://github.com/ansible-collections/community.general/pull/9657).
-- onepassword_doc lookup plugin - ensure that 1Password Connect support also works for this plugin (https://github.com/ansible-collections/community.general/pull/9625).
-- passwordstore lookup plugin - fix subkey creation even when ``create=false`` (https://github.com/ansible-collections/community.general/issues/9105, https://github.com/ansible-collections/community.general/pull/9106).
-- proxmox inventory plugin - plugin did not update cache correctly after ``meta: refresh_inventory`` (https://github.com/ansible-collections/community.general/issues/9710, https://github.com/ansible-collections/community.general/pull/9760).
-- redhat_subscription - use the "enable_content" option (when available) when
- registering using D-Bus, to ensure that subscription-manager enables the
- content on registration; this is particular important on EL 10+ and Fedora
- 41+
- (https://github.com/ansible-collections/community.general/pull/9778).
-- zfs - fix handling of multi-line values of user-defined ZFS properties (https://github.com/ansible-collections/community.general/pull/6264).
-- zfs_facts - parameter ``type`` now accepts multple values as documented (https://github.com/ansible-collections/community.general/issues/5909, https://github.com/ansible-collections/community.general/pull/9697).
-
-New Modules
------------
-
-- community.general.systemd_info - Gather C(systemd) unit info.
-
-v10.3.1
-=======
-
-Release Summary
----------------
-
-Bugfix release.
-
-Minor Changes
--------------
-
-- onepassword_ssh_key - refactor to move code to lookup class (https://github.com/ansible-collections/community.general/pull/9633).
-
-Bugfixes
---------
-
-- cloudflare_dns - fix crash when deleting a DNS record or when updating a record with ``solo=true`` (https://github.com/ansible-collections/community.general/issues/9652, https://github.com/ansible-collections/community.general/pull/9649).
-- homebrew - make package name parsing more resilient (https://github.com/ansible-collections/community.general/pull/9665, https://github.com/ansible-collections/community.general/issues/9641).
-- keycloak module utils - replaces missing return in get_role_composites method which caused it to return None instead of composite roles (https://github.com/ansible-collections/community.general/issues/9678, https://github.com/ansible-collections/community.general/pull/9691).
-- keycloak_client - fix and improve existing tests. The module showed a diff without actual changes, solved by improving the ``normalise_cr()`` function (https://github.com/ansible-collections/community.general/pull/9644).
-- proxmox - adds the ``pubkey`` parameter (back to) the ``update`` state (https://github.com/ansible-collections/community.general/issues/9642, https://github.com/ansible-collections/community.general/pull/9645).
-- proxmox - fixes a typo in the translation of the ``pubkey`` parameter to proxmox' ``ssh-public-keys`` (https://github.com/ansible-collections/community.general/issues/9642, https://github.com/ansible-collections/community.general/pull/9645).
-- xml - ensure file descriptor is closed (https://github.com/ansible-collections/community.general/pull/9695).
-
-v10.3.0
-=======
-
-Release Summary
----------------
-
-Regular bugfix and feature release.
-
-Minor Changes
--------------
-
-- MH module utils - delegate ``debug`` to the underlying ``AnsibleModule`` instance or issues a warning if an attribute already exists with that name (https://github.com/ansible-collections/community.general/pull/9577).
-- apache2_mod_proxy - better handling regexp extraction (https://github.com/ansible-collections/community.general/pull/9609).
-- apache2_mod_proxy - change type of ``state`` to a list of strings. No change for the users (https://github.com/ansible-collections/community.general/pull/9600).
-- apache2_mod_proxy - improve readability when using results from ``fecth_url()`` (https://github.com/ansible-collections/community.general/pull/9608).
-- apache2_mod_proxy - refactor repeated code into method (https://github.com/ansible-collections/community.general/pull/9599).
-- apache2_mod_proxy - remove unused parameter and code from ``Balancer`` constructor (https://github.com/ansible-collections/community.general/pull/9614).
-- apache2_mod_proxy - simplified and improved string manipulation (https://github.com/ansible-collections/community.general/pull/9614).
-- apache2_mod_proxy - use ``deps`` to handle dependencies (https://github.com/ansible-collections/community.general/pull/9612).
-- cgroup_memory_recap callback plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9583).
-- chroot connection plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9584).
-- cloud_init_data_facts - open file using ``open()`` as a context manager (https://github.com/ansible-collections/community.general/pull/9579).
-- cobbler inventory plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9584).
-- context_demo callback plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9583).
-- counter filter plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9585).
-- counter_enabled callback plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9583).
-- cpanm - enable usage of option ``--with-recommends`` (https://github.com/ansible-collections/community.general/issues/9554, https://github.com/ansible-collections/community.general/pull/9555).
-- cpanm - enable usage of option ``--with-suggests`` (https://github.com/ansible-collections/community.general/pull/9555).
-- crc32 filter plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9585).
-- cronvar - open file using ``open()`` as a context manager (https://github.com/ansible-collections/community.general/pull/9579).
-- crypttab - open file using ``open()`` as a context manager (https://github.com/ansible-collections/community.general/pull/9579).
-- default_without_diff callback plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9583).
-- dense callback plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9583).
-- dict filter plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9585).
-- dict_kv filter plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9585).
-- diy callback plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9583).
-- doas become plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9583).
-- dzdo become plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9583).
-- elastic callback plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9583).
-- from_csv filter plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9585).
-- from_ini filter plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9585).
-- funcd connection plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9584).
-- gitlab_runners inventory plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9584).
-- groupby_as_dict filter plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9585).
-- hashids filter plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9585).
-- icinga2 inventory plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9584).
-- incus connection plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9584).
-- iocage connection plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9584).
-- iocage inventory plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9584).
-- iocage inventory plugin - the new parameter ``sudo`` of the plugin lets the command ``iocage list -l`` to run as root on the iocage host. This is needed to get the IPv4 of a running DHCP jail (https://github.com/ansible-collections/community.general/issues/9572, https://github.com/ansible-collections/community.general/pull/9573).
-- iptables_state action plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9583).
-- jabber callback plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9583).
-- jail connection plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9584).
-- jc filter plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9585).
-- jira - transition operation now has ``status_id`` to directly reference wanted transition (https://github.com/ansible-collections/community.general/pull/9602).
-- json_query filter plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9585).
-- keep_keys filter plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9585).
-- keycloak_* modules - ``refresh_token`` parameter added. When multiple authentication parameters are provided (``token``, ``refresh_token``, and ``auth_username``/``auth_password``), modules will now automatically retry requests upon authentication errors (401), using in order the token, refresh token, and username/password (https://github.com/ansible-collections/community.general/pull/9494).
-- known_hosts - open file using ``open()`` as a context manager (https://github.com/ansible-collections/community.general/pull/9579).
-- ksu become plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9583).
-- linode inventory plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9584).
-- lists filter plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9585).
-- lists_mergeby filter plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9585).
-- log_plays callback plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9583).
-- loganalytics callback plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9583).
-- logdna callback plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9583).
-- logentries callback plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9583).
-- logstash callback plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9583).
-- lxc connection plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9584).
-- lxd connection plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9584).
-- lxd inventory plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9584).
-- machinectl become plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9583).
-- mail callback plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9583).
-- memcached cache plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9583).
-- nmap inventory plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9584).
-- nmcli - add a option ``fail_over_mac`` (https://github.com/ansible-collections/community.general/issues/9570, https://github.com/ansible-collections/community.general/pull/9571).
-- nrdp callback plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9583).
-- null callback plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9583).
-- one_template - adds ``filter`` option for retrieving templates which are not owned by the user (https://github.com/ansible-collections/community.general/pull/9547, https://github.com/ansible-collections/community.general/issues/9278).
-- online inventory plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9584).
-- opennebula inventory plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9584).
-- opentelemetry callback plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9583).
-- parted - open file using ``open()`` as a context manager (https://github.com/ansible-collections/community.general/pull/9579).
-- pbrun become plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9583).
-- pfexec become plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9583).
-- pickle cache plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9583).
-- pmrun become plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9583).
-- proxmox - refactors the proxmox module (https://github.com/ansible-collections/community.general/pull/9225).
-- proxmox inventory plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9584).
-- proxmox_pct_remote connection plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9584).
-- proxmox_template - add support for checksum validation with new options ``checksum_algorithm`` and ``checksum`` (https://github.com/ansible-collections/community.general/issues/9553, https://github.com/ansible-collections/community.general/pull/9601).
-- pulp_repo - open file using ``open()`` as a context manager (https://github.com/ansible-collections/community.general/pull/9579).
-- qubes connection plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9584).
-- random_mac filter plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9585).
-- redfish_info - add command ``GetAccountServiceConfig`` to get full information about AccountService configuration (https://github.com/ansible-collections/community.general/pull/9403).
-- redhat_subscription - open file using ``open()`` as a context manager (https://github.com/ansible-collections/community.general/pull/9579).
-- redis cache plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9583).
-- remove_keys filter plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9585).
-- replace_keys filter plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9585).
-- reveal_ansible_type filter plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9585).
-- run0 become plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9583).
-- saltstack connection plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9584).
-- say callback plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9583).
-- scaleway inventory plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9584).
-- selective callback plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9583).
-- sesu become plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9583).
-- shutdown action plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9583).
-- slack callback plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9583).
-- snap - add return value ``version`` (https://github.com/ansible-collections/community.general/pull/9598).
-- snap_alias - add return value ``version`` (https://github.com/ansible-collections/community.general/pull/9598).
-- solaris_zone - open file using ``open()`` as a context manager (https://github.com/ansible-collections/community.general/pull/9579).
-- sorcery - open file using ``open()`` as a context manager (https://github.com/ansible-collections/community.general/pull/9579).
-- splunk callback plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9583).
-- stackpath_compute inventory plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9584).
-- sudosu become plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9583).
-- sumologic callback plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9583).
-- syslog_json callback plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9583).
-- time filter plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9585).
-- timestamp callback plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9583).
-- timezone - open file using ``open()`` as a context manager (https://github.com/ansible-collections/community.general/pull/9579).
-- to_ini filter plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9585).
-- ufw - add support for ``vrrp`` protocol (https://github.com/ansible-collections/community.general/issues/9562, https://github.com/ansible-collections/community.general/pull/9582).
-- unicode_normalize filter plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9585).
-- unixy callback plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9583).
-- version_sort filter plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9585).
-- virtualbox inventory plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9584).
-- xen_orchestra inventory plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9584).
-- yaml cache plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9583).
-- yaml callback plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9583).
-- zone connection plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9584).
-
-Deprecated Features
--------------------
-
-- MH module utils - attribute ``debug`` definition in subclasses of MH is now deprecated, as that name will become a delegation to ``AnsibleModule`` in community.general 12.0.0, and any such attribute will be overridden by that delegation in that version (https://github.com/ansible-collections/community.general/pull/9577).
-- proxmox - removes default value ``false`` of ``update`` parameter. This will be changed to a default of ``true`` in community.general 11.0.0 (https://github.com/ansible-collections/community.general/pull/9225).
-
-Security Fixes
---------------
-
-- keycloak_client - Sanitize ``saml.encryption.private.key`` so it does not show in the logs (https://github.com/ansible-collections/community.general/pull/9621).
-
-Bugfixes
---------
-
-- homebrew - fix incorrect handling of homebrew modules when a tap is requested (https://github.com/ansible-collections/community.general/pull/9546, https://github.com/ansible-collections/community.general/issues/9533).
-- iocage inventory plugin - the plugin parses the IP4 tab of the jails list and put the elements into the new variable ``iocage_ip4_dict``. In multiple interface format the variable ``iocage_ip4`` keeps the comma-separated list of IP4 (https://github.com/ansible-collections/community.general/issues/9538).
-- pipx - honor option ``global`` when ``state=latest`` (https://github.com/ansible-collections/community.general/pull/9623).
-- proxmox - fixes idempotency of template conversions (https://github.com/ansible-collections/community.general/pull/9225, https://github.com/ansible-collections/community.general/issues/8811).
-- proxmox - fixes incorrect parsing for bind-only mounts (https://github.com/ansible-collections/community.general/pull/9225, https://github.com/ansible-collections/community.general/issues/8982).
-- proxmox - fixes issues with disk_volume variable (https://github.com/ansible-collections/community.general/pull/9225, https://github.com/ansible-collections/community.general/issues/9065).
-- proxmox module utils - fixes ignoring of ``choose_first_if_multiple`` argument in ``get_vmid`` (https://github.com/ansible-collections/community.general/pull/9225).
-- redhat_subscription - do not try to unsubscribe (i.e. remove subscriptions)
- when unregistering a system: newer versions of subscription-manager, as
- available in EL 10 and Fedora 41+, do not support entitlements anymore, and
- thus unsubscribing will fail
- (https://github.com/ansible-collections/community.general/pull/9578).
-
-New Plugins
------------
-
-Connection
-~~~~~~~~~~
-
-- community.general.proxmox_pct_remote - Run tasks in Proxmox LXC container instances using pct CLI via SSH.
-
-Filter
-~~~~~~
-
-- community.general.json_diff - Create a JSON patch by comparing two JSON files.
-- community.general.json_patch - Apply a JSON-Patch (RFC 6902) operation to an object.
-- community.general.json_patch_recipe - Apply JSON-Patch (RFC 6902) operations to an object.
-
-Lookup
-~~~~~~
-
-- community.general.onepassword_ssh_key - Fetch SSH keys stored in 1Password.
-
-New Modules
------------
-
-- community.general.proxmox_backup_info - Retrieve information on Proxmox scheduled backups.
-
-v10.2.0
-=======
-
-Release Summary
----------------
-
-Regular bugfix and feature release.
-
-Minor Changes
--------------
-
-- bitwarden lookup plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9324).
-- cgroup_memory_recap callback plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9321).
-- chef_databag lookup plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9324).
-- chroot connection plugin - clean up string conversions (https://github.com/ansible-collections/community.general/pull/9379).
-- chroot connection plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9322).
-- cobbler inventory plugin - clean up string conversions (https://github.com/ansible-collections/community.general/pull/9379).
-- cobbler inventory plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9323).
-- collection_version lookup plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9324).
-- consul_kv lookup plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9324).
-- context_demo callback plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9321).
-- counter_enabled callback plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9321).
-- credstash lookup plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9324).
-- cyberarkpassword lookup plugin - clean up string conversions (https://github.com/ansible-collections/community.general/pull/9379).
-- cyberarkpassword lookup plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9324).
-- dense callback plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9321).
-- dependent lookup plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9324).
-- dig lookup plugin - clean up string conversions (https://github.com/ansible-collections/community.general/pull/9379).
-- dig lookup plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9324).
-- diy callback plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9321).
-- dnstxt lookup plugin - clean up string conversions (https://github.com/ansible-collections/community.general/pull/9379).
-- dnstxt lookup plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9324).
-- doas become plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9319).
-- dsv lookup plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9324).
-- dzdo become plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9319).
-- elastic callback plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9321).
-- etcd lookup plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9324).
-- etcd3 lookup plugin - clean up string conversions (https://github.com/ansible-collections/community.general/pull/9379).
-- etcd3 lookup plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9324).
-- filetree lookup plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9324).
-- from_csv filter plugin - clean up string conversions (https://github.com/ansible-collections/community.general/pull/9379).
-- from_ini filter plugin - clean up string conversions (https://github.com/ansible-collections/community.general/pull/9379).
-- funcd connection plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9322).
-- github_app_access_token lookup plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9324).
-- gitlab_instance_variable - add support for ``raw`` variables suboption (https://github.com/ansible-collections/community.general/pull/9425).
-- gitlab_runners inventory plugin - clean up string conversions (https://github.com/ansible-collections/community.general/pull/9379).
-- gitlab_runners inventory plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9323).
-- hiera lookup plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9324).
-- icinga2 inventory plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9323).
-- incus connection plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9322).
-- iocage connection plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9322).
-- iocage inventory plugin - clean up string conversions (https://github.com/ansible-collections/community.general/pull/9379).
-- iptables_state action plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9318).
-- jabber callback plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9321).
-- jail connection plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9322).
-- keycloak - add an action group for Keycloak modules to allow ``module_defaults`` to be set for Keycloak tasks (https://github.com/ansible-collections/community.general/pull/9284).
-- keyring lookup plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9324).
-- ksu become plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9319).
-- lastpass lookup plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9324).
-- linode inventory plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9323).
-- lmdb_kv lookup plugin - clean up string conversions (https://github.com/ansible-collections/community.general/pull/9379).
-- lmdb_kv lookup plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9324).
-- locale_gen - invert the logic to determine ``ubuntu_mode``, making it look first for ``/etc/locale.gen`` (set ``ubuntu_mode`` to ``False``) and only then looking for ``/var/lib/locales/supported.d/`` (set ``ubuntu_mode`` to ``True``) (https://github.com/ansible-collections/community.general/pull/9238, https://github.com/ansible-collections/community.general/issues/9131, https://github.com/ansible-collections/community.general/issues/8487).
-- locale_gen - new return value ``mechanism`` to better express the semantics of the ``ubuntu_mode``, with the possible values being either ``glibc`` (``ubuntu_mode=False``) or ``ubuntu_legacy`` (``ubuntu_mode=True``) (https://github.com/ansible-collections/community.general/pull/9238).
-- log_plays callback plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9321).
-- loganalytics callback plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9321).
-- logdna callback plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9321).
-- logentries callback plugin - clean up string conversions (https://github.com/ansible-collections/community.general/pull/9379).
-- logentries callback plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9321).
-- lxc connection plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9322).
-- lxd connection plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9322).
-- lxd inventory plugin - clean up string conversions (https://github.com/ansible-collections/community.general/pull/9379).
-- lxd inventory plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9323).
-- machinectl become plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9319).
-- mail callback plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9321).
-- manageiq_alert_profiles - improve handling of parameter requirements (https://github.com/ansible-collections/community.general/pull/9449).
-- manifold lookup plugin - clean up string conversions (https://github.com/ansible-collections/community.general/pull/9379).
-- manifold lookup plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9324).
-- memcached cache plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9320).
-- merge_variables lookup plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9324).
-- nmap inventory plugin - clean up string conversions (https://github.com/ansible-collections/community.general/pull/9379).
-- nmap inventory plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9323).
-- nrdp callback plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9321).
-- onepassword lookup plugin - clean up string conversions (https://github.com/ansible-collections/community.general/pull/9379).
-- onepassword lookup plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9324).
-- onepassword_doc lookup plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9324).
-- online inventory plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9323).
-- opennebula inventory plugin - clean up string conversions (https://github.com/ansible-collections/community.general/pull/9379).
-- opennebula inventory plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9323).
-- opentelemetry callback plugin - remove code handling Python versions prior to 3.7 (https://github.com/ansible-collections/community.general/pull/9482).
-- opentelemetry callback plugin - remove code handling Python versions prior to 3.7 (https://github.com/ansible-collections/community.general/pull/9503).
-- opentelemetry callback plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9321).
-- pacemaker_cluster - remove unused code (https://github.com/ansible-collections/community.general/pull/9471).
-- pacemaker_cluster - using safer mechanism to run external command (https://github.com/ansible-collections/community.general/pull/9471).
-- passwordstore lookup plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9324).
-- pbrun become plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9319).
-- pfexec become plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9319).
-- pmrun become plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9319).
-- proxmox inventory plugin - clean up string conversions (https://github.com/ansible-collections/community.general/pull/9379).
-- proxmox inventory plugin - strip whitespace from ``user``, ``token_id``, and ``token_secret`` (https://github.com/ansible-collections/community.general/issues/9227, https://github.com/ansible-collections/community.general/pull/9228/).
-- proxmox inventory plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9323).
-- proxmox module utils - add method ``api_task_complete`` that can wait for task completion and return error message (https://github.com/ansible-collections/community.general/pull/9256).
-- proxmox_backup - refactor permission checking to improve code readability and maintainability (https://github.com/ansible-collections/community.general/pull/9239).
-- qubes connection plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9322).
-- random_pet lookup plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9324).
-- redis cache plugin - clean up string conversions (https://github.com/ansible-collections/community.general/pull/9379).
-- redis cache plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9320).
-- redis lookup plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9324).
-- revbitspss lookup plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9324).
-- saltstack connection plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9322).
-- say callback plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9321).
-- scaleway inventory plugin - clean up string conversions (https://github.com/ansible-collections/community.general/pull/9379).
-- scaleway inventory plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9323).
-- selective callback plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9321).
-- sesu become plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9319).
-- shelvefile lookup plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9324).
-- shutdown action plugin - clean up string conversions (https://github.com/ansible-collections/community.general/pull/9379).
-- shutdown action plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9318).
-- slack callback plugin - clean up string conversions (https://github.com/ansible-collections/community.general/pull/9379).
-- slack callback plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9321).
-- splunk callback plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9321).
-- stackpath_compute inventory plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9323).
-- sudosu become plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9319).
-- timestamp callback plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9321).
-- to_ini filter plugin - clean up string conversions (https://github.com/ansible-collections/community.general/pull/9379).
-- tss lookup plugin - clean up string conversions (https://github.com/ansible-collections/community.general/pull/9379).
-- tss lookup plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9324).
-- unixy callback plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9321).
-- virtualbox inventory plugin - clean up string conversions (https://github.com/ansible-collections/community.general/pull/9379).
-- virtualbox inventory plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9323).
-- xbps - add ``root`` and ``repository`` options to enable bootstrapping new void installations (https://github.com/ansible-collections/community.general/pull/9174).
-- xen_orchestra inventory plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9323).
-- xfconf - add return value ``version`` (https://github.com/ansible-collections/community.general/pull/9226).
-- xfconf_info - add return value ``version`` (https://github.com/ansible-collections/community.general/pull/9226).
-- yaml callback plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9321).
-- zone connection plugin - use f-strings instead of interpolations or ``format`` (https://github.com/ansible-collections/community.general/pull/9322).
-- zypper - add ``quiet`` option (https://github.com/ansible-collections/community.general/pull/9270).
-- zypper - add ``simple_errors`` option (https://github.com/ansible-collections/community.general/pull/9270).
-
-Deprecated Features
--------------------
-
-- atomic_container - module is deprecated and will be removed in community.general 13.0.0 (https://github.com/ansible-collections/community.general/pull/9487).
-- atomic_host - module is deprecated and will be removed in community.general 13.0.0 (https://github.com/ansible-collections/community.general/pull/9487).
-- atomic_image - module is deprecated and will be removed in community.general 13.0.0 (https://github.com/ansible-collections/community.general/pull/9487).
-- facter - module is deprecated and will be removed in community.general 12.0.0, use ``community.general.facter_facts`` instead (https://github.com/ansible-collections/community.general/pull/9451).
-- locale_gen - ``ubuntu_mode=True``, or ``mechanism=ubuntu_legacy`` is deprecated and will be removed in community.general 13.0.0 (https://github.com/ansible-collections/community.general/pull/9238).
-- pure module utils - the module utils is deprecated and will be removed from community.general 12.0.0. The modules using this were removed in community.general 3.0.0 (https://github.com/ansible-collections/community.general/pull/9432).
-- purestorage doc fragments - the doc fragment is deprecated and will be removed from community.general 12.0.0. The modules using this were removed in community.general 3.0.0 (https://github.com/ansible-collections/community.general/pull/9432).
-- sensu_check - module is deprecated and will be removed in community.general 13.0.0, use collection ``sensu.sensu_go`` instead (https://github.com/ansible-collections/community.general/pull/9483).
-- sensu_client - module is deprecated and will be removed in community.general 13.0.0, use collection ``sensu.sensu_go`` instead (https://github.com/ansible-collections/community.general/pull/9483).
-- sensu_handler - module is deprecated and will be removed in community.general 13.0.0, use collection ``sensu.sensu_go`` instead (https://github.com/ansible-collections/community.general/pull/9483).
-- sensu_silence - module is deprecated and will be removed in community.general 13.0.0, use collection ``sensu.sensu_go`` instead (https://github.com/ansible-collections/community.general/pull/9483).
-- sensu_subscription - module is deprecated and will be removed in community.general 13.0.0, use collection ``sensu.sensu_go`` instead (https://github.com/ansible-collections/community.general/pull/9483).
-- slack - the default value ``auto`` of the ``prepend_hash`` option is deprecated and will change to ``never`` in community.general 12.0.0 (https://github.com/ansible-collections/community.general/pull/9443).
-- yaml callback plugin - deprecate plugin in favor of ``result_format=yaml`` in plugin ``ansible.bulitin.default`` (https://github.com/ansible-collections/community.general/pull/9456).
-
-Security Fixes
---------------
-
-- keycloak_authentication - API calls did not properly set the ``priority`` during update resulting in incorrectly sorted authentication flows. This apparently only affects Keycloak 25 or newer (https://github.com/ansible-collections/community.general/pull/9263).
-
-Bugfixes
---------
-
-- dig lookup plugin - correctly handle ``NoNameserver`` exception (https://github.com/ansible-collections/community.general/pull/9363, https://github.com/ansible-collections/community.general/issues/9362).
-- homebrew - fix incorrect handling of aliased homebrew modules when the alias is requested (https://github.com/ansible-collections/community.general/pull/9255, https://github.com/ansible-collections/community.general/issues/9240).
-- htpasswd - report changes when file permissions are adjusted (https://github.com/ansible-collections/community.general/issues/9485, https://github.com/ansible-collections/community.general/pull/9490).
-- proxmox_backup - fix incorrect key lookup in vmid permission check (https://github.com/ansible-collections/community.general/pull/9223).
-- proxmox_disk - fix async method and make ``resize_disk`` method handle errors correctly (https://github.com/ansible-collections/community.general/pull/9256).
-- proxmox_template - fix the wrong path called on ``proxmox_template.task_status`` (https://github.com/ansible-collections/community.general/issues/9276, https://github.com/ansible-collections/community.general/pull/9277).
-- qubes connection plugin - fix the printing of debug information (https://github.com/ansible-collections/community.general/pull/9334).
-- redfish_utils module utils - Fix ``VerifyBiosAttributes`` command on multi system resource nodes (https://github.com/ansible-collections/community.general/pull/9234).
-
-New Plugins
------------
-
-Inventory
-~~~~~~~~~
-
-- community.general.iocage - iocage inventory source.
-
-New Modules
------------
-
-- community.general.android_sdk - Manages Android SDK packages.
-- community.general.ldap_inc - Use the Modify-Increment LDAP V3 feature to increment an attribute value.
-- community.general.systemd_creds_decrypt - C(systemd)'s C(systemd-creds decrypt) plugin.
-- community.general.systemd_creds_encrypt - C(systemd)'s C(systemd-creds encrypt) plugin.
-
-v10.1.0
-=======
-
-Release Summary
----------------
-
-Regular bugfix and feature release.
-
-Minor Changes
--------------
-
-- alternatives - add ``family`` parameter that allows to utilize the ``--family`` option available in RedHat version of update-alternatives (https://github.com/ansible-collections/community.general/issues/5060, https://github.com/ansible-collections/community.general/pull/9096).
-- cloudflare_dns - add support for ``comment`` and ``tags`` (https://github.com/ansible-collections/community.general/pull/9132).
-- deps module utils - add ``deps.clear()`` to clear out previously declared dependencies (https://github.com/ansible-collections/community.general/pull/9179).
-- homebrew - greatly speed up module when multiple packages are passed in the ``name`` option (https://github.com/ansible-collections/community.general/pull/9181).
-- homebrew - remove duplicated package name validation (https://github.com/ansible-collections/community.general/pull/9076).
-- iso_extract - adds ``password`` parameter that is passed to 7z (https://github.com/ansible-collections/community.general/pull/9159).
-- launchd - add ``plist`` option for services such as sshd, where the plist filename doesn't match the service name (https://github.com/ansible-collections/community.general/pull/9102).
-- nmcli - add ``sriov`` parameter that enables support for SR-IOV settings (https://github.com/ansible-collections/community.general/pull/9168).
-- pipx - add return value ``version`` (https://github.com/ansible-collections/community.general/pull/9180).
-- pipx_info - add return value ``version`` (https://github.com/ansible-collections/community.general/pull/9180).
-- proxmox_template - add server side artifact fetching support (https://github.com/ansible-collections/community.general/pull/9113).
-- redfish_command - add ``update_custom_oem_header``, ``update_custom_oem_params``, and ``update_custom_oem_mime_type`` options (https://github.com/ansible-collections/community.general/pull/9123).
-- redfish_utils module utils - remove redundant code (https://github.com/ansible-collections/community.general/pull/9190).
-- rpm_ostree_pkg - added the options ``apply_live`` (https://github.com/ansible-collections/community.general/pull/9167).
-- rpm_ostree_pkg - added the return value ``needs_reboot`` (https://github.com/ansible-collections/community.general/pull/9167).
-- scaleway_lb - minor simplification in the code (https://github.com/ansible-collections/community.general/pull/9189).
-- ssh_config - add ``dynamicforward`` option (https://github.com/ansible-collections/community.general/pull/9192).
-
-Deprecated Features
--------------------
-
-- opkg - deprecate value ``""`` for parameter ``force`` (https://github.com/ansible-collections/community.general/pull/9172).
-- redfish_utils module utils - deprecate method ``RedfishUtils._init_session()`` (https://github.com/ansible-collections/community.general/pull/9190).
-
-Bugfixes
---------
-
-- dnf_config_manager - fix hanging when prompting to import GPG keys (https://github.com/ansible-collections/community.general/pull/9124, https://github.com/ansible-collections/community.general/issues/8830).
-- dnf_config_manager - forces locale to ``C`` before module starts. If the locale was set to non-English, the output of the ``dnf config-manager`` could not be parsed (https://github.com/ansible-collections/community.general/pull/9157, https://github.com/ansible-collections/community.general/issues/9046).
-- flatpak - force the locale language to ``C`` when running the flatpak command (https://github.com/ansible-collections/community.general/pull/9187, https://github.com/ansible-collections/community.general/issues/8883).
-- gio_mime - fix command line when determining version of ``gio`` (https://github.com/ansible-collections/community.general/pull/9171, https://github.com/ansible-collections/community.general/issues/9158).
-- github_key - in check mode, a faulty call to ```datetime.strftime(...)``` was being made which generated an exception (https://github.com/ansible-collections/community.general/issues/9185).
-- homebrew_cask - allow ``+`` symbol in Homebrew cask name validation regex (https://github.com/ansible-collections/community.general/pull/9128).
-- keycloak_clientscope_type - sort the default and optional clientscope lists to improve the diff (https://github.com/ansible-collections/community.general/pull/9202).
-- slack - fail if Slack API response is not OK with error message (https://github.com/ansible-collections/community.general/pull/9198).
-
-New Plugins
------------
-
-Filter
-~~~~~~
-
-- community.general.accumulate - Produce a list of accumulated sums of the input list contents.
-
-New Modules
------------
-
-- community.general.decompress - Decompresses compressed files.
-- community.general.proxmox_backup - Start a VM backup in Proxmox VE cluster.
-
-v10.0.1
-=======
-
-Release Summary
----------------
-
-Bugfix release for inclusion in Ansible 11.0.0rc1.
-
-Bugfixes
---------
-
-- keycloak_client - fix diff by removing code that turns the attributes dict which contains additional settings into a list (https://github.com/ansible-collections/community.general/pull/9077).
-- keycloak_clientscope - fix diff and ``end_state`` by removing the code that turns the attributes dict, which contains additional config items, into a list (https://github.com/ansible-collections/community.general/pull/9082).
-- redfish_utils module utils - remove undocumented default applytime (https://github.com/ansible-collections/community.general/pull/9114).
-
-v10.0.0
-=======
-
-Release Summary
----------------
-
-This is release 10.0.0 of ``community.general``, released on 2024-11-04.
-
-Minor Changes
--------------
-
-- CmdRunner module util - argument formats can be specified as plain functions without calling ``cmd_runner_fmt.as_func()`` (https://github.com/ansible-collections/community.general/pull/8479).
-- CmdRunner module utils - the parameter ``force_lang`` now supports the special value ``auto`` which will automatically try and determine the best parsable locale in the system (https://github.com/ansible-collections/community.general/pull/8517).
-- MH module utils - add parameter ``when`` to ``cause_changes`` decorator (https://github.com/ansible-collections/community.general/pull/8766).
-- MH module utils - minor refactor in decorators (https://github.com/ansible-collections/community.general/pull/8766).
-- alternatives - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833).
-- ansible_galaxy_install - add return value ``version`` (https://github.com/ansible-collections/community.general/pull/9060).
-- ansible_galaxy_install - add upgrade feature (https://github.com/ansible-collections/community.general/pull/8431, https://github.com/ansible-collections/community.general/issues/8351).
-- ansible_galaxy_install - minor refactor in the module (https://github.com/ansible-collections/community.general/pull/8413).
-- apache2_mod_proxy - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8814).
-- apache2_mod_proxy - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833).
-- cargo - add option ``directory``, which allows source directory to be specified (https://github.com/ansible-collections/community.general/pull/8480).
-- cgroup_memory_recap, hipchat, jabber, log_plays, loganalytics, logentries, logstash, slack, splunk, sumologic, syslog_json callback plugins - make sure that all options are typed (https://github.com/ansible-collections/community.general/pull/8628).
-- chef_databag, consul_kv, cyberarkpassword, dsv, etcd, filetree, hiera, onepassword, onepassword_doc, onepassword_raw, passwordstore, redis, shelvefile, tss lookup plugins - make sure that all options are typed (https://github.com/ansible-collections/community.general/pull/8626).
-- chroot, funcd, incus, iocage, jail, lxc, lxd, qubes, zone connection plugins - make sure that all options are typed (https://github.com/ansible-collections/community.general/pull/8627).
-- cmd_runner module utils - add decorator ``cmd_runner_fmt.stack`` (https://github.com/ansible-collections/community.general/pull/8415).
-- cmd_runner module utils - refactor argument formatting code to its own Python module (https://github.com/ansible-collections/community.general/pull/8964).
-- cmd_runner_fmt module utils - simplify implementation of ``cmd_runner_fmt.as_bool_not()`` (https://github.com/ansible-collections/community.general/pull/8512).
-- cobbler, linode, lxd, nmap, online, scaleway, stackpath_compute, virtualbox inventory plugins - make sure that all options are typed (https://github.com/ansible-collections/community.general/pull/8625).
-- consul_acl - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833).
-- consul_kv - add argument for the datacenter option on Consul API (https://github.com/ansible-collections/community.general/pull/9026).
-- copr - Added ``includepkgs`` and ``excludepkgs`` parameters to limit the list of packages fetched or excluded from the repository(https://github.com/ansible-collections/community.general/pull/8779).
-- cpanm - add return value ``cpanm_version`` (https://github.com/ansible-collections/community.general/pull/9061).
-- credstash lookup plugin - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8822).
-- csv module utils - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8814).
-- deco MH module utils - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8822).
-- dig lookup plugin - add ``port`` option to specify DNS server port (https://github.com/ansible-collections/community.general/pull/8966).
-- django module utils - always retrieve version (https://github.com/ansible-collections/community.general/pull/9063).
-- django_check - add return value ``version`` (https://github.com/ansible-collections/community.general/pull/9063).
-- django_command - add return value ``version`` (https://github.com/ansible-collections/community.general/pull/9063).
-- django_createcachetable - add return value ``version`` (https://github.com/ansible-collections/community.general/pull/9063).
-- doas, dzdo, ksu, machinectl, pbrun, pfexec, pmrun, sesu, sudosu become plugins - make sure that all options are typed (https://github.com/ansible-collections/community.general/pull/8623).
-- etcd3 - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8822).
-- flatpak - improve the parsing of Flatpak application IDs based on official guidelines (https://github.com/ansible-collections/community.general/pull/8909).
-- gconftool2 - make use of ``ModuleHelper`` features to simplify code (https://github.com/ansible-collections/community.general/pull/8711).
-- gcontool2 - add return value ``version`` (https://github.com/ansible-collections/community.general/pull/9064).
-- gcontool2 module utils - add argument formatter ``version`` (https://github.com/ansible-collections/community.general/pull/9064).
-- gcontool2_info - add return value ``version`` (https://github.com/ansible-collections/community.general/pull/9064).
-- gio_mime - add return value ``version`` (https://github.com/ansible-collections/community.general/pull/9067).
-- gio_mime - adjust code ahead of the old ``VardDict`` deprecation (https://github.com/ansible-collections/community.general/pull/8855).
-- gio_mime - mute the old ``VarDict`` deprecation (https://github.com/ansible-collections/community.general/pull/8776).
-- gio_mime module utils - add argument formatter ``version`` (https://github.com/ansible-collections/community.general/pull/9067).
-- github_app_access_token lookup plugin - adds new ``private_key`` parameter (https://github.com/ansible-collections/community.general/pull/8989).
-- gitlab_deploy_key - better construct when using ``dict.items()`` (https://github.com/ansible-collections/community.general/pull/8876).
-- gitlab_group - add many new parameters (https://github.com/ansible-collections/community.general/pull/8908).
-- gitlab_group - better construct when using ``dict.items()`` (https://github.com/ansible-collections/community.general/pull/8876).
-- gitlab_group - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8814).
-- gitlab_issue - better construct when using ``dict.items()`` (https://github.com/ansible-collections/community.general/pull/8876).
-- gitlab_merge_request - better construct when using ``dict.items()`` (https://github.com/ansible-collections/community.general/pull/8876).
-- gitlab_project - add option ``container_expiration_policy`` to schedule container registry cleanup (https://github.com/ansible-collections/community.general/pull/8674).
-- gitlab_project - add option ``issues_access_level`` to enable/disable project issues (https://github.com/ansible-collections/community.general/pull/8760).
-- gitlab_project - add option ``model_registry_access_level`` to disable model registry (https://github.com/ansible-collections/community.general/pull/8688).
-- gitlab_project - add option ``pages_access_level`` to disable project pages (https://github.com/ansible-collections/community.general/pull/8688).
-- gitlab_project - add option ``repository_access_level`` to disable project repository (https://github.com/ansible-collections/community.general/pull/8674).
-- gitlab_project - add option ``service_desk_enabled`` to disable service desk (https://github.com/ansible-collections/community.general/pull/8688).
-- gitlab_project - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8822).
-- gitlab_project - sorted parameters in order to avoid future merge conflicts (https://github.com/ansible-collections/community.general/pull/8759).
-- gitlab_runner - better construct when using ``dict.items()`` (https://github.com/ansible-collections/community.general/pull/8876).
-- hashids filter plugin - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8814).
-- homebrew - speed up brew install and upgrade (https://github.com/ansible-collections/community.general/pull/9022).
-- hwc_ecs_instance - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8822).
-- hwc_evs_disk - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8822).
-- hwc_vpc_eip - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8822).
-- hwc_vpc_peering_connect - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8822).
-- hwc_vpc_port - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8822).
-- hwc_vpc_subnet - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8822).
-- icinga2_host - replace loop with dict comprehension (https://github.com/ansible-collections/community.general/pull/8876).
-- imc_rest - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833).
-- ipa_dnsrecord - adds ``SSHFP`` record type for managing SSH fingerprints in FreeIPA DNS (https://github.com/ansible-collections/community.general/pull/8404).
-- ipa_otptoken - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8822).
-- jenkins_node - add ``offline_message`` parameter for updating a Jenkins node offline cause reason when the state is "disabled" (offline) (https://github.com/ansible-collections/community.general/pull/9084)."
-- jira - adjust code ahead of the old ``VardDict`` deprecation (https://github.com/ansible-collections/community.general/pull/8856).
-- jira - mute the old ``VarDict`` deprecation (https://github.com/ansible-collections/community.general/pull/8776).
-- jira - replace deprecated params when using decorator ``cause_changes`` (https://github.com/ansible-collections/community.general/pull/8791).
-- keep_keys filter plugin - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8814).
-- keycloak module utils - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8822).
-- keycloak_client - add ``client-x509`` choice to ``client_authenticator_type`` (https://github.com/ansible-collections/community.general/pull/8973).
-- keycloak_client - assign auth flow by name (https://github.com/ansible-collections/community.general/pull/8428).
-- keycloak_client - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8814).
-- keycloak_clientscope - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8814).
-- keycloak_identity_provider - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8814).
-- keycloak_realm - add boolean toggle to configure organization support for a given keycloak realm (https://github.com/ansible-collections/community.general/issues/9027, https://github.com/ansible-collections/community.general/pull/8927/).
-- keycloak_user_federation - add module argument allowing users to optout of the removal of unspecified mappers, for example to keep the keycloak default mappers (https://github.com/ansible-collections/community.general/pull/8764).
-- keycloak_user_federation - add the user federation config parameter ``referral`` to the module arguments (https://github.com/ansible-collections/community.general/pull/8954).
-- keycloak_user_federation - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8814).
-- keycloak_user_federation - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8822).
-- keycloak_user_federation - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833).
-- linode - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8814).
-- locale_gen - add support for multiple locales (https://github.com/ansible-collections/community.general/issues/8677, https://github.com/ansible-collections/community.general/pull/8682).
-- lxc_container - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8822).
-- lxd_container - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8814).
-- manageiq_provider - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8814).
-- mattermost - adds support for message priority (https://github.com/ansible-collections/community.general/issues/9068, https://github.com/ansible-collections/community.general/pull/9087).
-- memcached, pickle, redis, yaml cache plugins - make sure that all options are typed (https://github.com/ansible-collections/community.general/pull/8624).
-- memset_dns_reload - replace loop with ``dict()`` (https://github.com/ansible-collections/community.general/pull/8876).
-- memset_memstore_info - replace loop with ``dict()`` (https://github.com/ansible-collections/community.general/pull/8876).
-- memset_server_info - replace loop with ``dict()`` (https://github.com/ansible-collections/community.general/pull/8876).
-- memset_zone - replace loop with ``dict()`` (https://github.com/ansible-collections/community.general/pull/8876).
-- memset_zone_domain - replace loop with ``dict()`` (https://github.com/ansible-collections/community.general/pull/8876).
-- memset_zone_record - replace loop with ``dict()`` (https://github.com/ansible-collections/community.general/pull/8876).
-- nmcli - add ``conn_enable`` param to reload connection (https://github.com/ansible-collections/community.general/issues/3752, https://github.com/ansible-collections/community.general/issues/8704, https://github.com/ansible-collections/community.general/pull/8897).
-- nmcli - add ``state=up`` and ``state=down`` to enable/disable connections (https://github.com/ansible-collections/community.general/issues/3752, https://github.com/ansible-collections/community.general/issues/8704, https://github.com/ansible-collections/community.general/issues/7152, https://github.com/ansible-collections/community.general/pull/8897).
-- nmcli - better construct when using ``dict.items()`` (https://github.com/ansible-collections/community.general/pull/8876).
-- npm - add ``force`` parameter to allow ``--force`` (https://github.com/ansible-collections/community.general/pull/8885).
-- ocapi_utils - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833).
-- one_image - add ``create``, ``template`` and ``datastore_id`` arguments for image creation (https://github.com/ansible-collections/community.general/pull/9075).
-- one_image - add ``wait_timeout`` argument for adjustable timeouts (https://github.com/ansible-collections/community.general/pull/9075).
-- one_image - add option ``persistent`` to manage image persistence (https://github.com/ansible-collections/community.general/issues/3578, https://github.com/ansible-collections/community.general/pull/8889).
-- one_image - extend xsd scheme to make it return a lot more info about image (https://github.com/ansible-collections/community.general/pull/8889).
-- one_image - refactor code to make it more similar to ``one_template`` and ``one_vnet`` (https://github.com/ansible-collections/community.general/pull/8889).
-- one_image_info - extend xsd scheme to make it return a lot more info about image (https://github.com/ansible-collections/community.general/pull/8889).
-- one_image_info - refactor code to make it more similar to ``one_template`` and ``one_vnet`` (https://github.com/ansible-collections/community.general/pull/8889).
-- one_service - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8814).
-- one_vm - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8814).
-- onepassword lookup plugin - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833).
-- open_iscsi - allow login to a portal with multiple targets without specifying any of them (https://github.com/ansible-collections/community.general/pull/8719).
-- openbsd_pkg - adds diff support to show changes in installed package list. This does not yet work for check mode (https://github.com/ansible-collections/community.general/pull/8402).
-- opennebula.py - add VM ``id`` and VM ``host`` to inventory host data (https://github.com/ansible-collections/community.general/pull/8532).
-- opentelemetry callback plugin - fix default value for ``store_spans_in_file`` causing traces to be produced to a file named ``None`` (https://github.com/ansible-collections/community.general/issues/8566, https://github.com/ansible-collections/community.general/pull/8741).
-- opkg - add return value ``version`` (https://github.com/ansible-collections/community.general/pull/9086).
-- passwordstore lookup plugin - add subkey creation/update support (https://github.com/ansible-collections/community.general/pull/8952).
-- passwordstore lookup plugin - add the current user to the lockfile file name to address issues on multi-user systems (https://github.com/ansible-collections/community.general/pull/8689).
-- pids - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833).
-- pipx - add parameter ``suffix`` to module (https://github.com/ansible-collections/community.general/pull/8675, https://github.com/ansible-collections/community.general/issues/8656).
-- pipx - added new states ``install_all``, ``uninject``, ``upgrade_shared``, ``pin``, and ``unpin`` (https://github.com/ansible-collections/community.general/pull/8809).
-- pipx - added parameter ``global`` to module (https://github.com/ansible-collections/community.general/pull/8793).
-- pipx - refactor out parsing of ``pipx list`` output to module utils (https://github.com/ansible-collections/community.general/pull/9044).
-- pipx - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833).
-- pipx_info - add new return value ``pinned`` (https://github.com/ansible-collections/community.general/pull/9044).
-- pipx_info - added parameter ``global`` to module (https://github.com/ansible-collections/community.general/pull/8793).
-- pipx_info - refactor out parsing of ``pipx list`` output to module utils (https://github.com/ansible-collections/community.general/pull/9044).
-- pipx_info - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833).
-- pkg5_publisher - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833).
-- pkgng - add option ``use_globs`` (default ``true``) to optionally disable glob patterns (https://github.com/ansible-collections/community.general/issues/8632, https://github.com/ansible-collections/community.general/pull/8633).
-- proxmox - add ``disk_volume`` and ``mount_volumes`` keys for better readability (https://github.com/ansible-collections/community.general/pull/8542).
-- proxmox - allow specification of the API port when using proxmox_* (https://github.com/ansible-collections/community.general/issues/8440, https://github.com/ansible-collections/community.general/pull/8441).
-- proxmox - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8814).
-- proxmox - translate the old ``disk`` and ``mounts`` keys to the new handling internally (https://github.com/ansible-collections/community.general/pull/8542).
-- proxmox inventory plugin - add new fact for LXC interface details (https://github.com/ansible-collections/community.general/pull/8713).
-- proxmox inventory plugin - clean up authentication code (https://github.com/ansible-collections/community.general/pull/8917).
-- proxmox inventory plugin - fix urllib3 ``InsecureRequestWarnings`` not being suppressed when a token is used (https://github.com/ansible-collections/community.general/pull/9099).
-- proxmox_disk - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8814).
-- proxmox_kvm - adds the ``ciupgrade`` parameter to specify whether cloud-init should upgrade system packages at first boot (https://github.com/ansible-collections/community.general/pull/9066).
-- proxmox_kvm - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8814).
-- proxmox_kvm - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8822).
-- proxmox_template - small refactor in logic for determining whether a template exists or not (https://github.com/ansible-collections/community.general/pull/8516).
-- proxmox_vm_info - add ``network`` option to retrieve current network information (https://github.com/ansible-collections/community.general/pull/8471).
-- redfish_* modules - adds ``ciphers`` option for custom cipher selection (https://github.com/ansible-collections/community.general/pull/8533).
-- redfish_command - add ``UpdateUserAccountTypes`` command (https://github.com/ansible-collections/community.general/issues/9058, https://github.com/ansible-collections/community.general/pull/9059).
-- redfish_command - add ``wait`` and ``wait_timeout`` options to allow a user to block a command until a service is accessible after performing the requested command (https://github.com/ansible-collections/community.general/issues/8051, https://github.com/ansible-collections/community.general/pull/8434).
-- redfish_command - add handling of the ``PasswordChangeRequired`` message from services in the ``UpdateUserPassword`` command to directly modify the user's password if the requested user is the one invoking the operation (https://github.com/ansible-collections/community.general/issues/8652, https://github.com/ansible-collections/community.general/pull/8653).
-- redfish_confg - remove ``CapacityBytes`` from required paramaters of the ``CreateVolume`` command (https://github.com/ansible-collections/community.general/pull/8956).
-- redfish_config - add parameter ``storage_none_volume_deletion`` to ``CreateVolume`` command in order to control the automatic deletion of non-RAID volumes (https://github.com/ansible-collections/community.general/pull/8990).
-- redfish_info - add command ``CheckAvailability`` to check if a service is accessible (https://github.com/ansible-collections/community.general/issues/8051, https://github.com/ansible-collections/community.general/pull/8434).
-- redfish_info - adds ``RedfishURI`` and ``StorageId`` to Disk inventory (https://github.com/ansible-collections/community.general/pull/8937).
-- redfish_utils - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833).
-- redfish_utils module utils - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8822).
-- redfish_utils module utils - schedule a BIOS configuration job at next reboot when the BIOS config is changed (https://github.com/ansible-collections/community.general/pull/9012).
-- redis cache plugin - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833).
-- redis, redis_info - add ``client_cert`` and ``client_key`` options to specify path to certificate for Redis authentication (https://github.com/ansible-collections/community.general/pull/8654).
-- redis_info - adds support for getting cluster info (https://github.com/ansible-collections/community.general/pull/8464).
-- remove_keys filter plugin - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8814).
-- replace_keys filter plugin - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8814).
-- scaleway - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833).
-- scaleway module utils - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8822).
-- scaleway_compute - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833).
-- scaleway_container - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8858).
-- scaleway_container_info - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8858).
-- scaleway_container_namespace - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8858).
-- scaleway_container_namespace_info - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8858).
-- scaleway_container_registry - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8858).
-- scaleway_container_registry_info - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8858).
-- scaleway_function - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8858).
-- scaleway_function_info - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8858).
-- scaleway_function_namespace - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8858).
-- scaleway_function_namespace_info - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8858).
-- scaleway_ip - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833).
-- scaleway_lb - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833).
-- scaleway_security_group - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8822).
-- scaleway_security_group - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833).
-- scaleway_user_data - better construct when using ``dict.items()`` (https://github.com/ansible-collections/community.general/pull/8876).
-- scaleway_user_data - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833).
-- sensu_silence - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833).
-- snmp_facts - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833).
-- sorcery - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833).
-- sudosu become plugin - added an option (``alt_method``) to enhance compatibility with more versions of ``su`` (https://github.com/ansible-collections/community.general/pull/8214).
-- udm_dns_record - replace loop with ``dict.update()`` (https://github.com/ansible-collections/community.general/pull/8876).
-- ufw - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8822).
-- unsafe plugin utils - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8814).
-- vardict module utils - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8814).
-- vars MH module utils - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8814).
-- virtualbox inventory plugin - expose a new parameter ``enable_advanced_group_parsing`` to change how the VirtualBox dynamic inventory parses VM groups (https://github.com/ansible-collections/community.general/issues/8508, https://github.com/ansible-collections/community.general/pull/8510).
-- vmadm - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8822).
-- wdc_redfish_command - minor change to handle upgrade file for Redfish WD platforms (https://github.com/ansible-collections/community.general/pull/8444).
-
-Breaking Changes / Porting Guide
---------------------------------
-
-- The collection no longer supports ansible-core 2.13 and ansible-core 2.14. While most (or even all) modules and plugins might still work with these versions, they are no longer tested in CI and breakages regarding them will not be fixed (https://github.com/ansible-collections/community.general/pull/8921).
-- cmd_runner module utils - CLI arguments created directly from module parameters are no longer assigned a default formatter (https://github.com/ansible-collections/community.general/pull/8928).
-- irc - the defaults of ``use_tls`` and ``validate_certs`` changed from ``false`` to ``true`` (https://github.com/ansible-collections/community.general/pull/8918).
-- rhsm_repository - the states ``present`` and ``absent`` have been removed. Use ``enabled`` and ``disabled`` instead (https://github.com/ansible-collections/community.general/pull/8918).
-
-Deprecated Features
--------------------
-
-- CmdRunner module util - setting the value of the ``ignore_none`` parameter within a ``CmdRunner`` context is deprecated and that feature should be removed in community.general 12.0.0 (https://github.com/ansible-collections/community.general/pull/8479).
-- MH decorator cause_changes module utils - deprecate parameters ``on_success`` and ``on_failure`` (https://github.com/ansible-collections/community.general/pull/8791).
-- git_config - the ``list_all`` option has been deprecated and will be removed in community.general 11.0.0. Use the ``community.general.git_config_info`` module instead (https://github.com/ansible-collections/community.general/pull/8453).
-- git_config - using ``state=present`` without providing ``value`` is deprecated and will be disallowed in community.general 11.0.0. Use the ``community.general.git_config_info`` module instead to read a value (https://github.com/ansible-collections/community.general/pull/8453).
-- hipchat - the hipchat service has been discontinued and the self-hosted variant has been End of Life since 2020. The module is therefore deprecated and will be removed from community.general 11.0.0 if nobody provides compelling reasons to still keep it (https://github.com/ansible-collections/community.general/pull/8919).
-- pipx - support for versions of the command line tool ``pipx`` older than ``1.7.0`` is deprecated and will be removed in community.general 11.0.0 (https://github.com/ansible-collections/community.general/pull/8793).
-- pipx_info - support for versions of the command line tool ``pipx`` older than ``1.7.0`` is deprecated and will be removed in community.general 11.0.0 (https://github.com/ansible-collections/community.general/pull/8793).
-
-Removed Features (previously deprecated)
-----------------------------------------
-
-- The consul_acl module has been removed. Use community.general.consul_token and/or community.general.consul_policy instead (https://github.com/ansible-collections/community.general/pull/8921).
-- The hipchat callback plugin has been removed. The hipchat service has been discontinued and the self-hosted variant has been End of Life since 2020 (https://github.com/ansible-collections/community.general/pull/8921).
-- The redhat module utils has been removed (https://github.com/ansible-collections/community.general/pull/8921).
-- The rhn_channel module has been removed (https://github.com/ansible-collections/community.general/pull/8921).
-- The rhn_register module has been removed (https://github.com/ansible-collections/community.general/pull/8921).
-- consul - removed the ``ack_params_state_absent`` option. It had no effect anymore (https://github.com/ansible-collections/community.general/pull/8918).
-- ejabberd_user - removed the ``logging`` option (https://github.com/ansible-collections/community.general/pull/8918).
-- gitlab modules - remove basic auth feature (https://github.com/ansible-collections/community.general/pull/8405).
-- proxmox_kvm - removed the ``proxmox_default_behavior`` option. Explicitly specify the old default values if you were using ``proxmox_default_behavior=compatibility``, otherwise simply remove it (https://github.com/ansible-collections/community.general/pull/8918).
-- redhat_subscriptions - removed the ``pool`` option. Use ``pool_ids`` instead (https://github.com/ansible-collections/community.general/pull/8918).
-
-Bugfixes
---------
-
-- bitwarden lookup plugin - fix ``KeyError`` in ``search_field`` (https://github.com/ansible-collections/community.general/issues/8549, https://github.com/ansible-collections/community.general/pull/8557).
-- bitwarden lookup plugin - support BWS v0.3.0 syntax breaking change (https://github.com/ansible-collections/community.general/pull/9028).
-- cloudflare_dns - fix changing Cloudflare SRV records (https://github.com/ansible-collections/community.general/issues/8679, https://github.com/ansible-collections/community.general/pull/8948).
-- cmd_runner module utils - call to ``get_best_parsable_locales()`` was missing parameter (https://github.com/ansible-collections/community.general/pull/8929).
-- collection_version lookup plugin - use ``importlib`` directly instead of the deprecated and in ansible-core 2.19 removed ``ansible.module_utils.compat.importlib`` (https://github.com/ansible-collections/community.general/pull/9084).
-- cpanm - use new ``VarDict`` to prevent deprecation warning (https://github.com/ansible-collections/community.general/issues/8410, https://github.com/ansible-collections/community.general/pull/8411).
-- dig lookup plugin - fix using only the last nameserver specified (https://github.com/ansible-collections/community.general/pull/8970).
-- django module utils - use new ``VarDict`` to prevent deprecation warning (https://github.com/ansible-collections/community.general/issues/8410, https://github.com/ansible-collections/community.general/pull/8411).
-- django_command - option ``command`` is now split lexically before passed to underlying PythonRunner (https://github.com/ansible-collections/community.general/pull/8944).
-- gconftool2_info - use new ``VarDict`` to prevent deprecation warning (https://github.com/ansible-collections/community.general/issues/8410, https://github.com/ansible-collections/community.general/pull/8411).
-- git_config - fix behavior of ``state=absent`` if ``value`` is present (https://github.com/ansible-collections/community.general/issues/8436, https://github.com/ansible-collections/community.general/pull/8452).
-- gitlab_group_access_token - fix crash in check mode caused by attempted access to a newly created access token (https://github.com/ansible-collections/community.general/pull/8796).
-- gitlab_label - update label's color (https://github.com/ansible-collections/community.general/pull/9010).
-- gitlab_project - fix ``container_expiration_policy`` not being applied when creating a new project (https://github.com/ansible-collections/community.general/pull/8790).
-- gitlab_project - fix crash caused by old Gitlab projects not having a ``container_expiration_policy`` attribute (https://github.com/ansible-collections/community.general/pull/8790).
-- gitlab_project_access_token - fix crash in check mode caused by attempted access to a newly created access token (https://github.com/ansible-collections/community.general/pull/8796).
-- gitlab_runner - fix ``paused`` parameter being ignored (https://github.com/ansible-collections/community.general/pull/8648).
-- homebrew - do not fail when brew prints warnings (https://github.com/ansible-collections/community.general/pull/8406, https://github.com/ansible-collections/community.general/issues/7044).
-- homebrew_cask - fix ``upgrade_all`` returns ``changed`` when nothing upgraded (https://github.com/ansible-collections/community.general/issues/8707, https://github.com/ansible-collections/community.general/pull/8708).
-- homectl - the module now tries to use ``legacycrypt`` on Python 3.13+ (https://github.com/ansible-collections/community.general/issues/4691, https://github.com/ansible-collections/community.general/pull/8987).
-- hponcfg - use new ``VarDict`` to prevent deprecation warning (https://github.com/ansible-collections/community.general/issues/8410, https://github.com/ansible-collections/community.general/pull/8411).
-- ini_file - pass absolute paths to ``module.atomic_move()`` (https://github.com/ansible/ansible/issues/83950, https://github.com/ansible-collections/community.general/pull/8925).
-- ipa_host - add ``force_create``, fix ``enabled`` and ``disabled`` states (https://github.com/ansible-collections/community.general/issues/1094, https://github.com/ansible-collections/community.general/pull/8920).
-- ipa_hostgroup - fix ``enabled `` and ``disabled`` states (https://github.com/ansible-collections/community.general/issues/8408, https://github.com/ansible-collections/community.general/pull/8900).
-- java_keystore - pass absolute paths to ``module.atomic_move()`` (https://github.com/ansible/ansible/issues/83950, https://github.com/ansible-collections/community.general/pull/8925).
-- jenkins_node - fixed ``enabled``, ``disable`` and ``absent`` node state redirect authorization issues, same as was present for ``present`` (https://github.com/ansible-collections/community.general/pull/9084).
-- jenkins_plugin - pass absolute paths to ``module.atomic_move()`` (https://github.com/ansible/ansible/issues/83950, https://github.com/ansible-collections/community.general/pull/8925).
-- kdeconfig - pass absolute paths to ``module.atomic_move()`` (https://github.com/ansible/ansible/issues/83950, https://github.com/ansible-collections/community.general/pull/8925).
-- kernel_blacklist - use new ``VarDict`` to prevent deprecation warning (https://github.com/ansible-collections/community.general/issues/8410, https://github.com/ansible-collections/community.general/pull/8411).
-- keycloak_client - fix TypeError when sanitizing the ``saml.signing.private.key`` attribute in the module's diff or state output. The ``sanitize_cr`` function expected a dict where in some cases a list might occur (https://github.com/ansible-collections/community.general/pull/8403).
-- keycloak_clientscope - remove IDs from clientscope and its protocol mappers on comparison for changed check (https://github.com/ansible-collections/community.general/pull/8545).
-- keycloak_clientscope_type - fix detect changes in check mode (https://github.com/ansible-collections/community.general/issues/9092, https://github.com/ansible-collections/community.general/pull/9093).
-- keycloak_group - fix crash caused in subgroup creation. The crash was caused by a missing or empty ``subGroups`` property in Keycloak ≥23 (https://github.com/ansible-collections/community.general/issues/8788, https://github.com/ansible-collections/community.general/pull/8979).
-- keycloak_realm - add normalizations for ``attributes`` and ``protocol_mappers`` (https://github.com/ansible-collections/community.general/pull/8496).
-- keycloak_realm - fix change detection in check mode by sorting the lists in the realms beforehand (https://github.com/ansible-collections/community.general/pull/8877).
-- keycloak_realm_key - fix invalid usage of ``parent_id`` (https://github.com/ansible-collections/community.general/issues/7850, https://github.com/ansible-collections/community.general/pull/8823).
-- keycloak_user_federation - add module argument allowing users to configure the update mode for the parameter ``bindCredential`` (https://github.com/ansible-collections/community.general/pull/8898).
-- keycloak_user_federation - fix key error when removing mappers during an update and new mappers are specified in the module args (https://github.com/ansible-collections/community.general/pull/8762).
-- keycloak_user_federation - fix the ``UnboundLocalError`` that occurs when an ID is provided for a user federation mapper (https://github.com/ansible-collections/community.general/pull/8831).
-- keycloak_user_federation - get cleartext IDP ``clientSecret`` from full realm info to detect changes to it (https://github.com/ansible-collections/community.general/issues/8294, https://github.com/ansible-collections/community.general/pull/8735).
-- keycloak_user_federation - minimize change detection by setting ``krbPrincipalAttribute`` to ``''`` in Keycloak responses if missing (https://github.com/ansible-collections/community.general/pull/8785).
-- keycloak_user_federation - remove ``lastSync`` parameter from Keycloak responses to minimize diff/changes (https://github.com/ansible-collections/community.general/pull/8812).
-- keycloak_user_federation - remove existing user federation mappers if they are not present in the federation configuration and will not be updated (https://github.com/ansible-collections/community.general/issues/7169, https://github.com/ansible-collections/community.general/pull/8695).
-- keycloak_user_federation - sort desired and after mapper list by name (analog to before mapper list) to minimize diff and make change detection more accurate (https://github.com/ansible-collections/community.general/pull/8761).
-- keycloak_userprofile - fix empty response when fetching userprofile component by removing ``parent=parent_id`` filter (https://github.com/ansible-collections/community.general/pull/8923).
-- keycloak_userprofile - improve diff by deserializing the fetched ``kc.user.profile.config`` and serialize it only when sending back (https://github.com/ansible-collections/community.general/pull/8940).
-- launched - correctly report changed status in check mode (https://github.com/ansible-collections/community.general/pull/8406).
-- locale_gen - use new ``VarDict`` to prevent deprecation warning (https://github.com/ansible-collections/community.general/issues/8410, https://github.com/ansible-collections/community.general/pull/8411).
-- lxd_container - fix bug introduced in previous commit (https://github.com/ansible-collections/community.general/pull/8895, https://github.com/ansible-collections/community.general/issues/8888).
-- mksysb - use new ``VarDict`` to prevent deprecation warning (https://github.com/ansible-collections/community.general/issues/8410, https://github.com/ansible-collections/community.general/pull/8411).
-- modprobe - fix check mode not being honored for ``persistent`` option (https://github.com/ansible-collections/community.general/issues/9051, https://github.com/ansible-collections/community.general/pull/9052).
-- nsupdate - fix 'index out of range' error when changing NS records by falling back to authority section of the response (https://github.com/ansible-collections/community.general/issues/8612, https://github.com/ansible-collections/community.general/pull/8614).
-- one_host - fix if statements for cases when ``ID=0`` (https://github.com/ansible-collections/community.general/issues/1199, https://github.com/ansible-collections/community.general/pull/8907).
-- one_image - fix module failing due to a class method typo (https://github.com/ansible-collections/community.general/pull/9056).
-- one_image_info - fix module failing due to a class method typo (https://github.com/ansible-collections/community.general/pull/9056).
-- one_service - fix service creation after it was deleted with ``unique`` parameter (https://github.com/ansible-collections/community.general/issues/3137, https://github.com/ansible-collections/community.general/pull/8887).
-- one_vnet - fix module failing due to a variable typo (https://github.com/ansible-collections/community.general/pull/9019).
-- opennebula inventory plugin - fix invalid reference to IP when inventory runs against NICs with no IPv4 address (https://github.com/ansible-collections/community.general/pull/8489).
-- opentelemetry callback - do not save the JSON response when using the ``ansible.builtin.uri`` module (https://github.com/ansible-collections/community.general/pull/8430).
-- opentelemetry callback - do not save the content response when using the ``ansible.builtin.slurp`` module (https://github.com/ansible-collections/community.general/pull/8430).
-- pam_limits - pass absolute paths to ``module.atomic_move()`` (https://github.com/ansible/ansible/issues/83950, https://github.com/ansible-collections/community.general/pull/8925).
-- paman - do not fail if an empty list of packages has been provided and there is nothing to do (https://github.com/ansible-collections/community.general/pull/8514).
-- pipx - it was ignoring ``global`` when listing existing applications (https://github.com/ansible-collections/community.general/pull/9044).
-- pipx module utils - add missing command line formatter for argument ``spec_metadata`` (https://github.com/ansible-collections/community.general/pull/9044).
-- pipx_info - use new ``VarDict`` to prevent deprecation warning (https://github.com/ansible-collections/community.general/issues/8410, https://github.com/ansible-collections/community.general/pull/8411).
-- proxmox - fix idempotency on creation of mount volumes using Proxmox' special ``:`` syntax (https://github.com/ansible-collections/community.general/issues/8407, https://github.com/ansible-collections/community.general/pull/8542).
-- proxmox - fixed an issue where the new volume handling incorrectly converted ``null`` values into ``"None"`` strings (https://github.com/ansible-collections/community.general/pull/8646).
-- proxmox - fixed an issue where volume strings where overwritten instead of appended to in the new ``build_volume()`` method (https://github.com/ansible-collections/community.general/pull/8646).
-- proxmox - removed the forced conversion of non-string values to strings to be consistent with the module documentation (https://github.com/ansible-collections/community.general/pull/8646).
-- proxmox inventory plugin - fixed a possible error on concatenating responses from proxmox. In case an API call unexpectedly returned an empty result, the inventory failed with a fatal error. Added check for empty response (https://github.com/ansible-collections/community.general/issues/8798, https://github.com/ansible-collections/community.general/pull/8794).
-- python_runner module utils - parameter ``path_prefix`` was being handled as string when it should be a list (https://github.com/ansible-collections/community.general/pull/8944).
-- redfish_utils module utils - do not fail when language is not exactly "en" (https://github.com/ansible-collections/community.general/pull/8613).
-- redfish_utils module utils - fix issue with URI parsing to gracefully handling trailing slashes when extracting member identifiers (https://github.com/ansible-collections/community.general/issues/9047, https://github.com/ansible-collections/community.general/pull/9057).
-- snap - use new ``VarDict`` to prevent deprecation warning (https://github.com/ansible-collections/community.general/issues/8410, https://github.com/ansible-collections/community.general/pull/8411).
-- snap_alias - use new ``VarDict`` to prevent deprecation warning (https://github.com/ansible-collections/community.general/issues/8410, https://github.com/ansible-collections/community.general/pull/8411).
-- udm_user - the module now tries to use ``legacycrypt`` on Python 3.13+ (https://github.com/ansible-collections/community.general/issues/4690, https://github.com/ansible-collections/community.general/pull/8987).
-
-Known Issues
-------------
-
-- jenkins_node - the module is not able to update offline message when node is already offline due to internally using toggleOffline API (https://github.com/ansible-collections/community.general/pull/9084).
-
-New Plugins
------------
-
-Filter
-~~~~~~
-
-- community.general.keep_keys - Keep specific keys from dictionaries in a list.
-- community.general.remove_keys - Remove specific keys from dictionaries in a list.
-- community.general.replace_keys - Replace specific keys in a list of dictionaries.
-- community.general.reveal_ansible_type - Return input type.
-
-Test
-~~~~
-
-- community.general.ansible_type - Validate input type.
-
-New Modules
------------
-
-- community.general.bootc_manage - Bootc Switch and Upgrade.
-- community.general.consul_agent_check - Add, modify, and delete checks within a consul cluster.
-- community.general.consul_agent_service - Add, modify and delete services within a consul cluster.
-- community.general.django_check - Wrapper for C(django-admin check).
-- community.general.django_createcachetable - Wrapper for C(django-admin createcachetable).
-- community.general.homebrew_services - Services manager for Homebrew.
-- community.general.ipa_getkeytab - Manage keytab file in FreeIPA.
-- community.general.jenkins_node - Manage Jenkins nodes.
-- community.general.keycloak_component - Allows administration of Keycloak components via Keycloak API.
-- community.general.keycloak_realm_keys_metadata_info - Allows obtaining Keycloak realm keys metadata via Keycloak API.
-- community.general.keycloak_userprofile - Allows managing Keycloak User Profiles.
-- community.general.krb_ticket - Kerberos utils for managing tickets.
-- community.general.one_vnet - Manages OpenNebula virtual networks.
-- community.general.zypper_repository_info - List Zypper repositories.
+This file is a placeholder; a version-specific ``CHANGELOG-vX.rst`` will be generated during releases from fragments
+under ``changelogs/fragments``. On release branches once a release has been created, consult the branch's version-specific
+file for changes that have occurred in that branch.
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 55a7098cc2..94c5299069 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -44,7 +44,49 @@ If you want to test a PR locally, refer to [our testing guide](https://github.co
If you find any inconsistencies or places in this document which can be improved, feel free to raise an issue or pull request to fix it.
-## Run sanity, unit or integration tests locally
+## Run sanity or unit locally (with antsibull-nox)
+
+The easiest way to run sanity and unit tests locally is to use [antsibull-nox](https://ansible.readthedocs.io/projects/antsibull-nox/).
+(If you have [nox](https://nox.thea.codes/en/stable/) installed, it will automatically install antsibull-nox in a virtual environment for you.)
+
+### Sanity tests
+
+The following commands show how to run ansible-test sanity tests:
+
+```.bash
+# Run basic sanity tests for all files in the collection:
+nox -Re ansible-test-sanity-devel
+
+# Run basic sanity tests for the given files and directories:
+nox -Re ansible-test-sanity-devel -- plugins/modules/system/pids.py tests/integration/targets/pids/
+
+# Run all other sanity tests for all files in the collection:
+nox -R
+```
+
+If you replace `-Re` with `-e`, respectively. If you leave `-R` away, then the virtual environments will be re-created. The `-R` re-uses them (if they already exist).
+
+### Unit tests
+
+The following commands show how to run unit tests:
+
+```.bash
+# Run all unit tests:
+nox -Re ansible-test-units-devel
+
+# Run all unit tests for one Python version (a lot faster):
+nox -Re ansible-test-units-devel -- --python 3.13
+
+# Run a specific unit test (for the nmcli module) for one Python version:
+nox -Re ansible-test-units-devel -- --python 3.13 tests/unit/plugins/modules/net_tools/test_nmcli.py
+```
+
+If you replace `-Re` with `-e`, then the virtual environments will be re-created. The `-R` re-uses them (if they already exist).
+
+## Run basic sanity, unit or integration tests locally (with ansible-test)
+
+Instead of using antsibull-nox, you can also run sanity and unit tests with ansible-test directly.
+This also allows you to run integration tests.
You have to check out the repository into a specific path structure to be able to run `ansible-test`. The path to the git checkout must end with `.../ansible_collections/community/general`. Please see [our testing guide](https://github.com/ansible/community-docs/blob/main/test_pr_locally_guide.rst) for instructions on how to check out the repository into a correct path structure. The short version of these instructions is:
@@ -56,20 +98,27 @@ cd ~/dev/ansible_collections/community/general
Then you can run `ansible-test` (which is a part of [ansible-core](https://pypi.org/project/ansible-core/)) inside the checkout. The following example commands expect that you have installed Docker or Podman. Note that Podman has only been supported by more recent ansible-core releases. If you are using Docker, the following will work with Ansible 2.9+.
-### Sanity tests
+### Basic sanity tests
-The following commands show how to run sanity tests:
+The following commands show how to run basic sanity tests:
```.bash
-# Run sanity tests for all files in the collection:
+# Run basic sanity tests for all files in the collection:
ansible-test sanity --docker -v
-# Run sanity tests for the given files and directories:
+# Run basic sanity tests for the given files and directories:
ansible-test sanity --docker -v plugins/modules/system/pids.py tests/integration/targets/pids/
```
### Unit tests
+Note that for running unit tests, you need to install required collections in the same folder structure that `community.general` is checked out in.
+Right now, you need to install [`community.internal_test_tools`](https://github.com/ansible-collections/community.internal_test_tools).
+If you want to use the latest version from GitHub, you can run:
+```
+git clone https://github.com/ansible-collections/community.internal_test_tools.git ~/dev/ansible_collections/community/internal_test_tools
+```
+
The following commands show how to run unit tests:
```.bash
@@ -85,6 +134,16 @@ ansible-test units --docker -v --python 3.8 tests/unit/plugins/modules/net_tools
### Integration tests
+Note that for running integration tests, you need to install required collections in the same folder structure that `community.general` is checked out in.
+Right now, depending on the test, you need to install [`ansible.posix`](https://github.com/ansible-collections/ansible.posix), [`community.crypto`](https://github.com/ansible-collections/community.crypto), and [`community.docker`](https://github.com/ansible-collections/community.docker):
+If you want to use the latest versions from GitHub, you can run:
+```
+mkdir -p ~/dev/ansible_collections/ansible
+git clone https://github.com/ansible-collections/ansible.posix.git ~/dev/ansible_collections/ansible/posix
+git clone https://github.com/ansible-collections/community.crypto.git ~/dev/ansible_collections/community/crypto
+git clone https://github.com/ansible-collections/community.docker.git ~/dev/ansible_collections/community/docker
+```
+
The following commands show how to run integration tests:
#### In Docker
@@ -92,8 +151,8 @@ The following commands show how to run integration tests:
Integration tests on Docker have the following parameters:
- `image_name` (required): The name of the Docker image. To get the list of supported Docker images, run
`ansible-test integration --help` and look for _target docker images_.
-- `test_name` (optional): The name of the integration test.
- For modules, this equals the short name of the module; for example, `pacman` in case of `community.general.pacman`.
+- `test_name` (optional): The name of the integration test.
+ For modules, this equals the short name of the module; for example, `pacman` in case of `community.general.pacman`.
For plugins, the plugin type is added before the plugin's short name, for example `callback_yaml` for the `community.general.yaml` callback.
```.bash
# Test all plugins/modules on fedora40
diff --git a/README.md b/README.md
index b4e51362ce..dbfc8c0f07 100644
--- a/README.md
+++ b/README.md
@@ -6,10 +6,10 @@ SPDX-License-Identifier: GPL-3.0-or-later
# Community General Collection
-[](https://docs.ansible.com/ansible/latest/collections/community/general/)
-[](https://dev.azure.com/ansible/community.general/_build?definitionId=31)
-[](https://github.com/ansible-collections/community.general/actions)
-[](https://github.com/ansible-collections/community.general/actions)
+[](https://docs.ansible.com/ansible/devel/collections/community/general/)
+[](https://dev.azure.com/ansible/community.general/_build?definitionId=31)
+[](https://github.com/ansible-collections/community.general/actions)
+[](https://github.com/ansible-collections/community.general/actions)
[](https://codecov.io/gh/ansible-collections/community.general)
[](https://api.reuse.software/info/github.com/ansible-collections/community.general)
@@ -39,7 +39,7 @@ For more information about communication, see the [Ansible communication guide](
## Tested with Ansible
-Tested with the current ansible-core 2.15, ansible-core 2.16, ansible-core 2.17, ansible-core 2.18 releases and the current development version of ansible-core. Ansible-core versions before 2.15.0 are not supported. This includes all ansible-base 2.10 and Ansible 2.9 releases.
+Tested with the current ansible-core 2.16, ansible-core 2.17, ansible-core 2.18, ansible-core 2.19 releases and the current development version of ansible-core. Ansible-core versions before 2.16.0 are not supported. This includes all ansible-base 2.10 and Ansible 2.9 releases.
## External requirements
@@ -118,7 +118,7 @@ See the [Releasing guidelines](https://github.com/ansible/community-docs/blob/ma
## Release notes
-See the [changelog](https://github.com/ansible-collections/community.general/blob/stable-10/CHANGELOG.md).
+See the [changelog](https://github.com/ansible-collections/community.general/blob/main/CHANGELOG.md).
## Roadmap
@@ -137,8 +137,8 @@ See [this issue](https://github.com/ansible-collections/community.general/issues
This collection is primarily licensed and distributed as a whole under the GNU General Public License v3.0 or later.
-See [LICENSES/GPL-3.0-or-later.txt](https://github.com/ansible-collections/community.general/blob/stable-10/COPYING) for the full text.
+See [LICENSES/GPL-3.0-or-later.txt](https://github.com/ansible-collections/community.general/blob/main/COPYING) for the full text.
-Parts of the collection are licensed under the [BSD 2-Clause license](https://github.com/ansible-collections/community.general/blob/stable-10/LICENSES/BSD-2-Clause.txt), the [MIT license](https://github.com/ansible-collections/community.general/blob/stable-10/LICENSES/MIT.txt), and the [PSF 2.0 license](https://github.com/ansible-collections/community.general/blob/stable-10/LICENSES/PSF-2.0.txt).
+Parts of the collection are licensed under the [BSD 2-Clause license](https://github.com/ansible-collections/community.general/blob/main/LICENSES/BSD-2-Clause.txt), the [MIT license](https://github.com/ansible-collections/community.general/blob/main/LICENSES/MIT.txt), and the [PSF 2.0 license](https://github.com/ansible-collections/community.general/blob/main/LICENSES/PSF-2.0.txt).
-All files have a machine readable `SDPX-License-Identifier:` comment denoting its respective license(s) or an equivalent entry in an accompanying `.license` file. Only changelog fragments (which will not be part of a release) are covered by a blanket statement in `.reuse/dep5`. This conforms to the [REUSE specification](https://reuse.software/spec/).
+All files have a machine readable `SDPX-License-Identifier:` comment denoting its respective license(s) or an equivalent entry in an accompanying `.license` file. Only changelog fragments (which will not be part of a release) are covered by a blanket statement in `REUSE.toml`. This conforms to the [REUSE specification](https://reuse.software/spec/).
diff --git a/antsibull-nox.toml b/antsibull-nox.toml
index 8c1c501896..c631d3a3af 100644
--- a/antsibull-nox.toml
+++ b/antsibull-nox.toml
@@ -8,10 +8,39 @@
"community.docker" = "git+https://github.com/ansible-collections/community.docker.git,main"
"community.internal_test_tools" = "git+https://github.com/ansible-collections/community.internal_test_tools.git,main"
+[collection_sources_per_ansible.'2.16']
+# community.crypto's main branch needs ansible-core >= 2.17
+"community.crypto" = "git+https://github.com/ansible-collections/community.crypto.git,stable-2"
+
[sessions]
+[sessions.lint]
+run_isort = false
+run_black = false
+run_flake8 = false
+run_pylint = false
+run_yamllint = true
+yamllint_config = ".yamllint"
+# yamllint_config_plugins = ".yamllint-docs"
+# yamllint_config_plugins_examples = ".yamllint-examples"
+run_mypy = false
+
[sessions.docs_check]
validate_collection_refs="all"
+codeblocks_restrict_types = [
+ "ansible-output",
+ "console",
+ "ini",
+ "json",
+ "python",
+ "shell",
+ "yaml",
+ "yaml+jinja",
+ "text",
+]
+codeblocks_restrict_type_exact_case = true
+codeblocks_allow_without_type = false
+codeblocks_allow_literal_blocks = false
[sessions.license_check]
@@ -20,6 +49,18 @@ run_no_unwanted_files = true
no_unwanted_files_module_extensions = [".py"]
no_unwanted_files_yaml_extensions = [".yml"]
run_action_groups = true
+run_no_trailing_whitespace = true
+no_trailing_whitespace_skip_paths = [
+ "tests/integration/targets/iso_extract/files/test.iso",
+ "tests/integration/targets/java_cert/files/testpkcs.p12",
+ "tests/integration/targets/one_host/files/testhost/tmp/opennebula-fixtures.json.gz",
+ "tests/integration/targets/one_template/files/testhost/tmp/opennebula-fixtures.json.gz",
+ "tests/integration/targets/setup_flatpak_remote/files/repo.tar.xz",
+]
+no_trailing_whitespace_skip_directories = [
+ "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/",
+ "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/input/",
+]
[[sessions.extra_checks.action_groups_config]]
name = "consul"
@@ -38,11 +79,11 @@ exclusions = [
]
doc_fragment = "community.general.keycloak.actiongroup_keycloak"
-[[sessions.extra_checks.action_groups_config]]
-name = "proxmox"
-pattern = "^proxmox(_.*)?$"
-exclusions = []
-doc_fragment = "community.general.proxmox.actiongroup_proxmox"
-
[sessions.build_import_check]
run_galaxy_importer = true
+
+[sessions.ansible_test_sanity]
+include_devel = true
+
+[sessions.ansible_test_units]
+include_devel = true
diff --git a/changelogs/changelog.yaml b/changelogs/changelog.yaml
index f41d55c26f..f8129d5d73 100644
--- a/changelogs/changelog.yaml
+++ b/changelogs/changelog.yaml
@@ -1,1803 +1,3 @@
---
-ancestor: 9.0.0
-releases:
- 10.0.0:
- changes:
- breaking_changes:
- - The collection no longer supports ansible-core 2.13 and ansible-core 2.14.
- While most (or even all) modules and plugins might still work with these
- versions, they are no longer tested in CI and breakages regarding them will
- not be fixed (https://github.com/ansible-collections/community.general/pull/8921).
- - cmd_runner module utils - CLI arguments created directly from module parameters
- are no longer assigned a default formatter (https://github.com/ansible-collections/community.general/pull/8928).
- - irc - the defaults of ``use_tls`` and ``validate_certs`` changed from ``false``
- to ``true`` (https://github.com/ansible-collections/community.general/pull/8918).
- - rhsm_repository - the states ``present`` and ``absent`` have been removed.
- Use ``enabled`` and ``disabled`` instead (https://github.com/ansible-collections/community.general/pull/8918).
- bugfixes:
- - bitwarden lookup plugin - fix ``KeyError`` in ``search_field`` (https://github.com/ansible-collections/community.general/issues/8549,
- https://github.com/ansible-collections/community.general/pull/8557).
- - bitwarden lookup plugin - support BWS v0.3.0 syntax breaking change (https://github.com/ansible-collections/community.general/pull/9028).
- - cloudflare_dns - fix changing Cloudflare SRV records (https://github.com/ansible-collections/community.general/issues/8679,
- https://github.com/ansible-collections/community.general/pull/8948).
- - cmd_runner module utils - call to ``get_best_parsable_locales()`` was missing
- parameter (https://github.com/ansible-collections/community.general/pull/8929).
- - collection_version lookup plugin - use ``importlib`` directly instead of
- the deprecated and in ansible-core 2.19 removed ``ansible.module_utils.compat.importlib``
- (https://github.com/ansible-collections/community.general/pull/9084).
- - cpanm - use new ``VarDict`` to prevent deprecation warning (https://github.com/ansible-collections/community.general/issues/8410,
- https://github.com/ansible-collections/community.general/pull/8411).
- - dig lookup plugin - fix using only the last nameserver specified (https://github.com/ansible-collections/community.general/pull/8970).
- - django module utils - use new ``VarDict`` to prevent deprecation warning
- (https://github.com/ansible-collections/community.general/issues/8410, https://github.com/ansible-collections/community.general/pull/8411).
- - django_command - option ``command`` is now split lexically before passed
- to underlying PythonRunner (https://github.com/ansible-collections/community.general/pull/8944).
- - gconftool2_info - use new ``VarDict`` to prevent deprecation warning (https://github.com/ansible-collections/community.general/issues/8410,
- https://github.com/ansible-collections/community.general/pull/8411).
- - git_config - fix behavior of ``state=absent`` if ``value`` is present (https://github.com/ansible-collections/community.general/issues/8436,
- https://github.com/ansible-collections/community.general/pull/8452).
- - gitlab_group_access_token - fix crash in check mode caused by attempted
- access to a newly created access token (https://github.com/ansible-collections/community.general/pull/8796).
- - gitlab_label - update label's color (https://github.com/ansible-collections/community.general/pull/9010).
- - gitlab_project - fix ``container_expiration_policy`` not being applied when
- creating a new project (https://github.com/ansible-collections/community.general/pull/8790).
- - gitlab_project - fix crash caused by old Gitlab projects not having a ``container_expiration_policy``
- attribute (https://github.com/ansible-collections/community.general/pull/8790).
- - gitlab_project_access_token - fix crash in check mode caused by attempted
- access to a newly created access token (https://github.com/ansible-collections/community.general/pull/8796).
- - gitlab_runner - fix ``paused`` parameter being ignored (https://github.com/ansible-collections/community.general/pull/8648).
- - homebrew - do not fail when brew prints warnings (https://github.com/ansible-collections/community.general/pull/8406,
- https://github.com/ansible-collections/community.general/issues/7044).
- - homebrew_cask - fix ``upgrade_all`` returns ``changed`` when nothing upgraded
- (https://github.com/ansible-collections/community.general/issues/8707, https://github.com/ansible-collections/community.general/pull/8708).
- - homectl - the module now tries to use ``legacycrypt`` on Python 3.13+ (https://github.com/ansible-collections/community.general/issues/4691,
- https://github.com/ansible-collections/community.general/pull/8987).
- - hponcfg - use new ``VarDict`` to prevent deprecation warning (https://github.com/ansible-collections/community.general/issues/8410,
- https://github.com/ansible-collections/community.general/pull/8411).
- - ini_file - pass absolute paths to ``module.atomic_move()`` (https://github.com/ansible/ansible/issues/83950,
- https://github.com/ansible-collections/community.general/pull/8925).
- - ipa_host - add ``force_create``, fix ``enabled`` and ``disabled`` states
- (https://github.com/ansible-collections/community.general/issues/1094, https://github.com/ansible-collections/community.general/pull/8920).
- - ipa_hostgroup - fix ``enabled `` and ``disabled`` states (https://github.com/ansible-collections/community.general/issues/8408,
- https://github.com/ansible-collections/community.general/pull/8900).
- - java_keystore - pass absolute paths to ``module.atomic_move()`` (https://github.com/ansible/ansible/issues/83950,
- https://github.com/ansible-collections/community.general/pull/8925).
- - jenkins_node - fixed ``enabled``, ``disable`` and ``absent`` node state
- redirect authorization issues, same as was present for ``present`` (https://github.com/ansible-collections/community.general/pull/9084).
- - jenkins_plugin - pass absolute paths to ``module.atomic_move()`` (https://github.com/ansible/ansible/issues/83950,
- https://github.com/ansible-collections/community.general/pull/8925).
- - kdeconfig - pass absolute paths to ``module.atomic_move()`` (https://github.com/ansible/ansible/issues/83950,
- https://github.com/ansible-collections/community.general/pull/8925).
- - kernel_blacklist - use new ``VarDict`` to prevent deprecation warning (https://github.com/ansible-collections/community.general/issues/8410,
- https://github.com/ansible-collections/community.general/pull/8411).
- - keycloak_client - fix TypeError when sanitizing the ``saml.signing.private.key``
- attribute in the module's diff or state output. The ``sanitize_cr`` function
- expected a dict where in some cases a list might occur (https://github.com/ansible-collections/community.general/pull/8403).
- - keycloak_clientscope - remove IDs from clientscope and its protocol mappers
- on comparison for changed check (https://github.com/ansible-collections/community.general/pull/8545).
- - keycloak_clientscope_type - fix detect changes in check mode (https://github.com/ansible-collections/community.general/issues/9092,
- https://github.com/ansible-collections/community.general/pull/9093).
- - "keycloak_group - fix crash caused in subgroup creation. The crash was caused\
- \ by a missing or empty ``subGroups`` property in Keycloak \u226523 (https://github.com/ansible-collections/community.general/issues/8788,\
- \ https://github.com/ansible-collections/community.general/pull/8979)."
- - keycloak_realm - add normalizations for ``attributes`` and ``protocol_mappers``
- (https://github.com/ansible-collections/community.general/pull/8496).
- - keycloak_realm - fix change detection in check mode by sorting the lists
- in the realms beforehand (https://github.com/ansible-collections/community.general/pull/8877).
- - keycloak_realm_key - fix invalid usage of ``parent_id`` (https://github.com/ansible-collections/community.general/issues/7850,
- https://github.com/ansible-collections/community.general/pull/8823).
- - keycloak_user_federation - add module argument allowing users to configure
- the update mode for the parameter ``bindCredential`` (https://github.com/ansible-collections/community.general/pull/8898).
- - keycloak_user_federation - fix key error when removing mappers during an
- update and new mappers are specified in the module args (https://github.com/ansible-collections/community.general/pull/8762).
- - keycloak_user_federation - fix the ``UnboundLocalError`` that occurs when
- an ID is provided for a user federation mapper (https://github.com/ansible-collections/community.general/pull/8831).
- - keycloak_user_federation - get cleartext IDP ``clientSecret`` from full
- realm info to detect changes to it (https://github.com/ansible-collections/community.general/issues/8294,
- https://github.com/ansible-collections/community.general/pull/8735).
- - keycloak_user_federation - minimize change detection by setting ``krbPrincipalAttribute``
- to ``''`` in Keycloak responses if missing (https://github.com/ansible-collections/community.general/pull/8785).
- - keycloak_user_federation - remove ``lastSync`` parameter from Keycloak responses
- to minimize diff/changes (https://github.com/ansible-collections/community.general/pull/8812).
- - keycloak_user_federation - remove existing user federation mappers if they
- are not present in the federation configuration and will not be updated
- (https://github.com/ansible-collections/community.general/issues/7169, https://github.com/ansible-collections/community.general/pull/8695).
- - keycloak_user_federation - sort desired and after mapper list by name (analog
- to before mapper list) to minimize diff and make change detection more accurate
- (https://github.com/ansible-collections/community.general/pull/8761).
- - keycloak_userprofile - fix empty response when fetching userprofile component
- by removing ``parent=parent_id`` filter (https://github.com/ansible-collections/community.general/pull/8923).
- - keycloak_userprofile - improve diff by deserializing the fetched ``kc.user.profile.config``
- and serialize it only when sending back (https://github.com/ansible-collections/community.general/pull/8940).
- - launched - correctly report changed status in check mode (https://github.com/ansible-collections/community.general/pull/8406).
- - locale_gen - use new ``VarDict`` to prevent deprecation warning (https://github.com/ansible-collections/community.general/issues/8410,
- https://github.com/ansible-collections/community.general/pull/8411).
- - lxd_container - fix bug introduced in previous commit (https://github.com/ansible-collections/community.general/pull/8895,
- https://github.com/ansible-collections/community.general/issues/8888).
- - mksysb - use new ``VarDict`` to prevent deprecation warning (https://github.com/ansible-collections/community.general/issues/8410,
- https://github.com/ansible-collections/community.general/pull/8411).
- - modprobe - fix check mode not being honored for ``persistent`` option (https://github.com/ansible-collections/community.general/issues/9051,
- https://github.com/ansible-collections/community.general/pull/9052).
- - nsupdate - fix 'index out of range' error when changing NS records by falling
- back to authority section of the response (https://github.com/ansible-collections/community.general/issues/8612,
- https://github.com/ansible-collections/community.general/pull/8614).
- - one_host - fix if statements for cases when ``ID=0`` (https://github.com/ansible-collections/community.general/issues/1199,
- https://github.com/ansible-collections/community.general/pull/8907).
- - one_image - fix module failing due to a class method typo (https://github.com/ansible-collections/community.general/pull/9056).
- - one_image_info - fix module failing due to a class method typo (https://github.com/ansible-collections/community.general/pull/9056).
- - one_service - fix service creation after it was deleted with ``unique``
- parameter (https://github.com/ansible-collections/community.general/issues/3137,
- https://github.com/ansible-collections/community.general/pull/8887).
- - one_vnet - fix module failing due to a variable typo (https://github.com/ansible-collections/community.general/pull/9019).
- - opennebula inventory plugin - fix invalid reference to IP when inventory
- runs against NICs with no IPv4 address (https://github.com/ansible-collections/community.general/pull/8489).
- - opentelemetry callback - do not save the JSON response when using the ``ansible.builtin.uri``
- module (https://github.com/ansible-collections/community.general/pull/8430).
- - opentelemetry callback - do not save the content response when using the
- ``ansible.builtin.slurp`` module (https://github.com/ansible-collections/community.general/pull/8430).
- - pam_limits - pass absolute paths to ``module.atomic_move()`` (https://github.com/ansible/ansible/issues/83950,
- https://github.com/ansible-collections/community.general/pull/8925).
- - paman - do not fail if an empty list of packages has been provided and there
- is nothing to do (https://github.com/ansible-collections/community.general/pull/8514).
- - pipx - it was ignoring ``global`` when listing existing applications (https://github.com/ansible-collections/community.general/pull/9044).
- - pipx module utils - add missing command line formatter for argument ``spec_metadata``
- (https://github.com/ansible-collections/community.general/pull/9044).
- - pipx_info - use new ``VarDict`` to prevent deprecation warning (https://github.com/ansible-collections/community.general/issues/8410,
- https://github.com/ansible-collections/community.general/pull/8411).
- - proxmox - fix idempotency on creation of mount volumes using Proxmox' special
- ``:`` syntax (https://github.com/ansible-collections/community.general/issues/8407,
- https://github.com/ansible-collections/community.general/pull/8542).
- - proxmox - fixed an issue where the new volume handling incorrectly converted
- ``null`` values into ``"None"`` strings (https://github.com/ansible-collections/community.general/pull/8646).
- - proxmox - fixed an issue where volume strings where overwritten instead
- of appended to in the new ``build_volume()`` method (https://github.com/ansible-collections/community.general/pull/8646).
- - proxmox - removed the forced conversion of non-string values to strings
- to be consistent with the module documentation (https://github.com/ansible-collections/community.general/pull/8646).
- - proxmox inventory plugin - fixed a possible error on concatenating responses
- from proxmox. In case an API call unexpectedly returned an empty result,
- the inventory failed with a fatal error. Added check for empty response
- (https://github.com/ansible-collections/community.general/issues/8798, https://github.com/ansible-collections/community.general/pull/8794).
- - python_runner module utils - parameter ``path_prefix`` was being handled
- as string when it should be a list (https://github.com/ansible-collections/community.general/pull/8944).
- - redfish_utils module utils - do not fail when language is not exactly "en"
- (https://github.com/ansible-collections/community.general/pull/8613).
- - redfish_utils module utils - fix issue with URI parsing to gracefully handling
- trailing slashes when extracting member identifiers (https://github.com/ansible-collections/community.general/issues/9047,
- https://github.com/ansible-collections/community.general/pull/9057).
- - snap - use new ``VarDict`` to prevent deprecation warning (https://github.com/ansible-collections/community.general/issues/8410,
- https://github.com/ansible-collections/community.general/pull/8411).
- - snap_alias - use new ``VarDict`` to prevent deprecation warning (https://github.com/ansible-collections/community.general/issues/8410,
- https://github.com/ansible-collections/community.general/pull/8411).
- - udm_user - the module now tries to use ``legacycrypt`` on Python 3.13+ (https://github.com/ansible-collections/community.general/issues/4690,
- https://github.com/ansible-collections/community.general/pull/8987).
- deprecated_features:
- - CmdRunner module util - setting the value of the ``ignore_none`` parameter
- within a ``CmdRunner`` context is deprecated and that feature should be
- removed in community.general 12.0.0 (https://github.com/ansible-collections/community.general/pull/8479).
- - MH decorator cause_changes module utils - deprecate parameters ``on_success``
- and ``on_failure`` (https://github.com/ansible-collections/community.general/pull/8791).
- - git_config - the ``list_all`` option has been deprecated and will be removed
- in community.general 11.0.0. Use the ``community.general.git_config_info``
- module instead (https://github.com/ansible-collections/community.general/pull/8453).
- - git_config - using ``state=present`` without providing ``value`` is deprecated
- and will be disallowed in community.general 11.0.0. Use the ``community.general.git_config_info``
- module instead to read a value (https://github.com/ansible-collections/community.general/pull/8453).
- - hipchat - the hipchat service has been discontinued and the self-hosted
- variant has been End of Life since 2020. The module is therefore deprecated
- and will be removed from community.general 11.0.0 if nobody provides compelling
- reasons to still keep it (https://github.com/ansible-collections/community.general/pull/8919).
- - 'pipx - support for versions of the command line tool ``pipx`` older than
- ``1.7.0`` is deprecated and will be removed in community.general 11.0.0
- (https://github.com/ansible-collections/community.general/pull/8793).
-
- '
- - 'pipx_info - support for versions of the command line tool ``pipx`` older
- than ``1.7.0`` is deprecated and will be removed in community.general 11.0.0
- (https://github.com/ansible-collections/community.general/pull/8793).
-
- '
- known_issues:
- - jenkins_node - the module is not able to update offline message when node
- is already offline due to internally using toggleOffline API (https://github.com/ansible-collections/community.general/pull/9084).
- minor_changes:
- - CmdRunner module util - argument formats can be specified as plain functions
- without calling ``cmd_runner_fmt.as_func()`` (https://github.com/ansible-collections/community.general/pull/8479).
- - CmdRunner module utils - the parameter ``force_lang`` now supports the special
- value ``auto`` which will automatically try and determine the best parsable
- locale in the system (https://github.com/ansible-collections/community.general/pull/8517).
- - MH module utils - add parameter ``when`` to ``cause_changes`` decorator
- (https://github.com/ansible-collections/community.general/pull/8766).
- - MH module utils - minor refactor in decorators (https://github.com/ansible-collections/community.general/pull/8766).
- - alternatives - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833).
- - ansible_galaxy_install - add return value ``version`` (https://github.com/ansible-collections/community.general/pull/9060).
- - ansible_galaxy_install - add upgrade feature (https://github.com/ansible-collections/community.general/pull/8431,
- https://github.com/ansible-collections/community.general/issues/8351).
- - ansible_galaxy_install - minor refactor in the module (https://github.com/ansible-collections/community.general/pull/8413).
- - apache2_mod_proxy - replace Python 2.6 construct with dict comprehensions
- (https://github.com/ansible-collections/community.general/pull/8814).
- - apache2_mod_proxy - replace Python 2.6 construct with dict comprehensions
- (https://github.com/ansible-collections/community.general/pull/8833).
- - cargo - add option ``directory``, which allows source directory to be specified
- (https://github.com/ansible-collections/community.general/pull/8480).
- - cgroup_memory_recap, hipchat, jabber, log_plays, loganalytics, logentries,
- logstash, slack, splunk, sumologic, syslog_json callback plugins - make
- sure that all options are typed (https://github.com/ansible-collections/community.general/pull/8628).
- - chef_databag, consul_kv, cyberarkpassword, dsv, etcd, filetree, hiera, onepassword,
- onepassword_doc, onepassword_raw, passwordstore, redis, shelvefile, tss
- lookup plugins - make sure that all options are typed (https://github.com/ansible-collections/community.general/pull/8626).
- - chroot, funcd, incus, iocage, jail, lxc, lxd, qubes, zone connection plugins
- - make sure that all options are typed (https://github.com/ansible-collections/community.general/pull/8627).
- - cmd_runner module utils - add decorator ``cmd_runner_fmt.stack`` (https://github.com/ansible-collections/community.general/pull/8415).
- - cmd_runner module utils - refactor argument formatting code to its own Python
- module (https://github.com/ansible-collections/community.general/pull/8964).
- - cmd_runner_fmt module utils - simplify implementation of ``cmd_runner_fmt.as_bool_not()``
- (https://github.com/ansible-collections/community.general/pull/8512).
- - cobbler, linode, lxd, nmap, online, scaleway, stackpath_compute, virtualbox
- inventory plugins - make sure that all options are typed (https://github.com/ansible-collections/community.general/pull/8625).
- - consul_acl - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833).
- - consul_kv - add argument for the datacenter option on Consul API (https://github.com/ansible-collections/community.general/pull/9026).
- - copr - Added ``includepkgs`` and ``excludepkgs`` parameters to limit the
- list of packages fetched or excluded from the repository(https://github.com/ansible-collections/community.general/pull/8779).
- - cpanm - add return value ``cpanm_version`` (https://github.com/ansible-collections/community.general/pull/9061).
- - credstash lookup plugin - replace Python 2.6 construct with dict comprehensions
- (https://github.com/ansible-collections/community.general/pull/8822).
- - csv module utils - replace Python 2.6 construct with dict comprehensions
- (https://github.com/ansible-collections/community.general/pull/8814).
- - deco MH module utils - replace Python 2.6 construct with dict comprehensions
- (https://github.com/ansible-collections/community.general/pull/8822).
- - dig lookup plugin - add ``port`` option to specify DNS server port (https://github.com/ansible-collections/community.general/pull/8966).
- - django module utils - always retrieve version (https://github.com/ansible-collections/community.general/pull/9063).
- - django_check - add return value ``version`` (https://github.com/ansible-collections/community.general/pull/9063).
- - django_command - add return value ``version`` (https://github.com/ansible-collections/community.general/pull/9063).
- - django_createcachetable - add return value ``version`` (https://github.com/ansible-collections/community.general/pull/9063).
- - doas, dzdo, ksu, machinectl, pbrun, pfexec, pmrun, sesu, sudosu become plugins
- - make sure that all options are typed (https://github.com/ansible-collections/community.general/pull/8623).
- - etcd3 - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8822).
- - flatpak - improve the parsing of Flatpak application IDs based on official
- guidelines (https://github.com/ansible-collections/community.general/pull/8909).
- - gconftool2 - make use of ``ModuleHelper`` features to simplify code (https://github.com/ansible-collections/community.general/pull/8711).
- - gcontool2 - add return value ``version`` (https://github.com/ansible-collections/community.general/pull/9064).
- - gcontool2 module utils - add argument formatter ``version`` (https://github.com/ansible-collections/community.general/pull/9064).
- - gcontool2_info - add return value ``version`` (https://github.com/ansible-collections/community.general/pull/9064).
- - gio_mime - add return value ``version`` (https://github.com/ansible-collections/community.general/pull/9067).
- - gio_mime - adjust code ahead of the old ``VardDict`` deprecation (https://github.com/ansible-collections/community.general/pull/8855).
- - gio_mime - mute the old ``VarDict`` deprecation (https://github.com/ansible-collections/community.general/pull/8776).
- - gio_mime module utils - add argument formatter ``version`` (https://github.com/ansible-collections/community.general/pull/9067).
- - github_app_access_token lookup plugin - adds new ``private_key`` parameter
- (https://github.com/ansible-collections/community.general/pull/8989).
- - gitlab_deploy_key - better construct when using ``dict.items()`` (https://github.com/ansible-collections/community.general/pull/8876).
- - gitlab_group - add many new parameters (https://github.com/ansible-collections/community.general/pull/8908).
- - gitlab_group - better construct when using ``dict.items()`` (https://github.com/ansible-collections/community.general/pull/8876).
- - gitlab_group - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8814).
- - gitlab_issue - better construct when using ``dict.items()`` (https://github.com/ansible-collections/community.general/pull/8876).
- - gitlab_merge_request - better construct when using ``dict.items()`` (https://github.com/ansible-collections/community.general/pull/8876).
- - gitlab_project - add option ``container_expiration_policy`` to schedule
- container registry cleanup (https://github.com/ansible-collections/community.general/pull/8674).
- - gitlab_project - add option ``issues_access_level`` to enable/disable project
- issues (https://github.com/ansible-collections/community.general/pull/8760).
- - gitlab_project - add option ``model_registry_access_level`` to disable model
- registry (https://github.com/ansible-collections/community.general/pull/8688).
- - gitlab_project - add option ``pages_access_level`` to disable project pages
- (https://github.com/ansible-collections/community.general/pull/8688).
- - gitlab_project - add option ``repository_access_level`` to disable project
- repository (https://github.com/ansible-collections/community.general/pull/8674).
- - gitlab_project - add option ``service_desk_enabled`` to disable service
- desk (https://github.com/ansible-collections/community.general/pull/8688).
- - gitlab_project - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8822).
- - gitlab_project - sorted parameters in order to avoid future merge conflicts
- (https://github.com/ansible-collections/community.general/pull/8759).
- - gitlab_runner - better construct when using ``dict.items()`` (https://github.com/ansible-collections/community.general/pull/8876).
- - hashids filter plugin - replace Python 2.6 construct with dict comprehensions
- (https://github.com/ansible-collections/community.general/pull/8814).
- - homebrew - speed up brew install and upgrade (https://github.com/ansible-collections/community.general/pull/9022).
- - hwc_ecs_instance - replace Python 2.6 construct with dict comprehensions
- (https://github.com/ansible-collections/community.general/pull/8822).
- - hwc_evs_disk - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8822).
- - hwc_vpc_eip - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8822).
- - hwc_vpc_peering_connect - replace Python 2.6 construct with dict comprehensions
- (https://github.com/ansible-collections/community.general/pull/8822).
- - hwc_vpc_port - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8822).
- - hwc_vpc_subnet - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8822).
- - icinga2_host - replace loop with dict comprehension (https://github.com/ansible-collections/community.general/pull/8876).
- - imc_rest - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833).
- - ipa_dnsrecord - adds ``SSHFP`` record type for managing SSH fingerprints
- in FreeIPA DNS (https://github.com/ansible-collections/community.general/pull/8404).
- - ipa_otptoken - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8822).
- - jenkins_node - add ``offline_message`` parameter for updating a Jenkins
- node offline cause reason when the state is "disabled" (offline) (https://github.com/ansible-collections/community.general/pull/9084)."
- - jira - adjust code ahead of the old ``VardDict`` deprecation (https://github.com/ansible-collections/community.general/pull/8856).
- - jira - mute the old ``VarDict`` deprecation (https://github.com/ansible-collections/community.general/pull/8776).
- - jira - replace deprecated params when using decorator ``cause_changes``
- (https://github.com/ansible-collections/community.general/pull/8791).
- - keep_keys filter plugin - replace Python 2.6 construct with dict comprehensions
- (https://github.com/ansible-collections/community.general/pull/8814).
- - keycloak module utils - replace Python 2.6 construct with dict comprehensions
- (https://github.com/ansible-collections/community.general/pull/8822).
- - keycloak_client - add ``client-x509`` choice to ``client_authenticator_type``
- (https://github.com/ansible-collections/community.general/pull/8973).
- - keycloak_client - assign auth flow by name (https://github.com/ansible-collections/community.general/pull/8428).
- - keycloak_client - replace Python 2.6 construct with dict comprehensions
- (https://github.com/ansible-collections/community.general/pull/8814).
- - keycloak_clientscope - replace Python 2.6 construct with dict comprehensions
- (https://github.com/ansible-collections/community.general/pull/8814).
- - keycloak_identity_provider - replace Python 2.6 construct with dict comprehensions
- (https://github.com/ansible-collections/community.general/pull/8814).
- - keycloak_realm - add boolean toggle to configure organization support for
- a given keycloak realm (https://github.com/ansible-collections/community.general/issues/9027,
- https://github.com/ansible-collections/community.general/pull/8927/).
- - keycloak_user_federation - add module argument allowing users to optout
- of the removal of unspecified mappers, for example to keep the keycloak
- default mappers (https://github.com/ansible-collections/community.general/pull/8764).
- - keycloak_user_federation - add the user federation config parameter ``referral``
- to the module arguments (https://github.com/ansible-collections/community.general/pull/8954).
- - keycloak_user_federation - replace Python 2.6 construct with dict comprehensions
- (https://github.com/ansible-collections/community.general/pull/8814).
- - keycloak_user_federation - replace Python 2.6 construct with dict comprehensions
- (https://github.com/ansible-collections/community.general/pull/8822).
- - keycloak_user_federation - replace Python 2.6 construct with dict comprehensions
- (https://github.com/ansible-collections/community.general/pull/8833).
- - linode - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8814).
- - locale_gen - add support for multiple locales (https://github.com/ansible-collections/community.general/issues/8677,
- https://github.com/ansible-collections/community.general/pull/8682).
- - lxc_container - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8822).
- - lxd_container - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8814).
- - manageiq_provider - replace Python 2.6 construct with dict comprehensions
- (https://github.com/ansible-collections/community.general/pull/8814).
- - mattermost - adds support for message priority (https://github.com/ansible-collections/community.general/issues/9068,
- https://github.com/ansible-collections/community.general/pull/9087).
- - memcached, pickle, redis, yaml cache plugins - make sure that all options
- are typed (https://github.com/ansible-collections/community.general/pull/8624).
- - memset_dns_reload - replace loop with ``dict()`` (https://github.com/ansible-collections/community.general/pull/8876).
- - memset_memstore_info - replace loop with ``dict()`` (https://github.com/ansible-collections/community.general/pull/8876).
- - memset_server_info - replace loop with ``dict()`` (https://github.com/ansible-collections/community.general/pull/8876).
- - memset_zone - replace loop with ``dict()`` (https://github.com/ansible-collections/community.general/pull/8876).
- - memset_zone_domain - replace loop with ``dict()`` (https://github.com/ansible-collections/community.general/pull/8876).
- - memset_zone_record - replace loop with ``dict()`` (https://github.com/ansible-collections/community.general/pull/8876).
- - nmcli - add ``conn_enable`` param to reload connection (https://github.com/ansible-collections/community.general/issues/3752,
- https://github.com/ansible-collections/community.general/issues/8704, https://github.com/ansible-collections/community.general/pull/8897).
- - nmcli - add ``state=up`` and ``state=down`` to enable/disable connections
- (https://github.com/ansible-collections/community.general/issues/3752, https://github.com/ansible-collections/community.general/issues/8704,
- https://github.com/ansible-collections/community.general/issues/7152, https://github.com/ansible-collections/community.general/pull/8897).
- - nmcli - better construct when using ``dict.items()`` (https://github.com/ansible-collections/community.general/pull/8876).
- - npm - add ``force`` parameter to allow ``--force`` (https://github.com/ansible-collections/community.general/pull/8885).
- - ocapi_utils - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833).
- - one_image - add ``create``, ``template`` and ``datastore_id`` arguments
- for image creation (https://github.com/ansible-collections/community.general/pull/9075).
- - one_image - add ``wait_timeout`` argument for adjustable timeouts (https://github.com/ansible-collections/community.general/pull/9075).
- - one_image - add option ``persistent`` to manage image persistence (https://github.com/ansible-collections/community.general/issues/3578,
- https://github.com/ansible-collections/community.general/pull/8889).
- - one_image - extend xsd scheme to make it return a lot more info about image
- (https://github.com/ansible-collections/community.general/pull/8889).
- - one_image - refactor code to make it more similar to ``one_template`` and
- ``one_vnet`` (https://github.com/ansible-collections/community.general/pull/8889).
- - one_image_info - extend xsd scheme to make it return a lot more info about
- image (https://github.com/ansible-collections/community.general/pull/8889).
- - one_image_info - refactor code to make it more similar to ``one_template``
- and ``one_vnet`` (https://github.com/ansible-collections/community.general/pull/8889).
- - one_service - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8814).
- - one_vm - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8814).
- - onepassword lookup plugin - replace Python 2.6 construct with dict comprehensions
- (https://github.com/ansible-collections/community.general/pull/8833).
- - open_iscsi - allow login to a portal with multiple targets without specifying
- any of them (https://github.com/ansible-collections/community.general/pull/8719).
- - openbsd_pkg - adds diff support to show changes in installed package list.
- This does not yet work for check mode (https://github.com/ansible-collections/community.general/pull/8402).
- - opennebula.py - add VM ``id`` and VM ``host`` to inventory host data (https://github.com/ansible-collections/community.general/pull/8532).
- - opentelemetry callback plugin - fix default value for ``store_spans_in_file``
- causing traces to be produced to a file named ``None`` (https://github.com/ansible-collections/community.general/issues/8566,
- https://github.com/ansible-collections/community.general/pull/8741).
- - opkg - add return value ``version`` (https://github.com/ansible-collections/community.general/pull/9086).
- - passwordstore lookup plugin - add subkey creation/update support (https://github.com/ansible-collections/community.general/pull/8952).
- - passwordstore lookup plugin - add the current user to the lockfile file
- name to address issues on multi-user systems (https://github.com/ansible-collections/community.general/pull/8689).
- - pids - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833).
- - pipx - add parameter ``suffix`` to module (https://github.com/ansible-collections/community.general/pull/8675,
- https://github.com/ansible-collections/community.general/issues/8656).
- - pipx - added new states ``install_all``, ``uninject``, ``upgrade_shared``,
- ``pin``, and ``unpin`` (https://github.com/ansible-collections/community.general/pull/8809).
- - pipx - added parameter ``global`` to module (https://github.com/ansible-collections/community.general/pull/8793).
- - pipx - refactor out parsing of ``pipx list`` output to module utils (https://github.com/ansible-collections/community.general/pull/9044).
- - pipx - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833).
- - pipx_info - add new return value ``pinned`` (https://github.com/ansible-collections/community.general/pull/9044).
- - pipx_info - added parameter ``global`` to module (https://github.com/ansible-collections/community.general/pull/8793).
- - pipx_info - refactor out parsing of ``pipx list`` output to module utils
- (https://github.com/ansible-collections/community.general/pull/9044).
- - pipx_info - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833).
- - pkg5_publisher - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833).
- - pkgng - add option ``use_globs`` (default ``true``) to optionally disable
- glob patterns (https://github.com/ansible-collections/community.general/issues/8632,
- https://github.com/ansible-collections/community.general/pull/8633).
- - proxmox - add ``disk_volume`` and ``mount_volumes`` keys for better readability
- (https://github.com/ansible-collections/community.general/pull/8542).
- - proxmox - allow specification of the API port when using proxmox_* (https://github.com/ansible-collections/community.general/issues/8440,
- https://github.com/ansible-collections/community.general/pull/8441).
- - proxmox - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8814).
- - proxmox - translate the old ``disk`` and ``mounts`` keys to the new handling
- internally (https://github.com/ansible-collections/community.general/pull/8542).
- - proxmox inventory plugin - add new fact for LXC interface details (https://github.com/ansible-collections/community.general/pull/8713).
- - proxmox inventory plugin - clean up authentication code (https://github.com/ansible-collections/community.general/pull/8917).
- - proxmox inventory plugin - fix urllib3 ``InsecureRequestWarnings`` not being
- suppressed when a token is used (https://github.com/ansible-collections/community.general/pull/9099).
- - proxmox_disk - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8814).
- - proxmox_kvm - adds the ``ciupgrade`` parameter to specify whether cloud-init
- should upgrade system packages at first boot (https://github.com/ansible-collections/community.general/pull/9066).
- - proxmox_kvm - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8814).
- - proxmox_kvm - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8822).
- - proxmox_template - small refactor in logic for determining whether a template
- exists or not (https://github.com/ansible-collections/community.general/pull/8516).
- - proxmox_vm_info - add ``network`` option to retrieve current network information
- (https://github.com/ansible-collections/community.general/pull/8471).
- - redfish_* modules - adds ``ciphers`` option for custom cipher selection
- (https://github.com/ansible-collections/community.general/pull/8533).
- - redfish_command - add ``UpdateUserAccountTypes`` command (https://github.com/ansible-collections/community.general/issues/9058,
- https://github.com/ansible-collections/community.general/pull/9059).
- - redfish_command - add ``wait`` and ``wait_timeout`` options to allow a user
- to block a command until a service is accessible after performing the requested
- command (https://github.com/ansible-collections/community.general/issues/8051,
- https://github.com/ansible-collections/community.general/pull/8434).
- - redfish_command - add handling of the ``PasswordChangeRequired`` message
- from services in the ``UpdateUserPassword`` command to directly modify the
- user's password if the requested user is the one invoking the operation
- (https://github.com/ansible-collections/community.general/issues/8652, https://github.com/ansible-collections/community.general/pull/8653).
- - redfish_confg - remove ``CapacityBytes`` from required paramaters of the
- ``CreateVolume`` command (https://github.com/ansible-collections/community.general/pull/8956).
- - redfish_config - add parameter ``storage_none_volume_deletion`` to ``CreateVolume``
- command in order to control the automatic deletion of non-RAID volumes (https://github.com/ansible-collections/community.general/pull/8990).
- - redfish_info - add command ``CheckAvailability`` to check if a service is
- accessible (https://github.com/ansible-collections/community.general/issues/8051,
- https://github.com/ansible-collections/community.general/pull/8434).
- - redfish_info - adds ``RedfishURI`` and ``StorageId`` to Disk inventory (https://github.com/ansible-collections/community.general/pull/8937).
- - redfish_utils - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833).
- - redfish_utils module utils - replace Python 2.6 construct with dict comprehensions
- (https://github.com/ansible-collections/community.general/pull/8822).
- - redfish_utils module utils - schedule a BIOS configuration job at next reboot
- when the BIOS config is changed (https://github.com/ansible-collections/community.general/pull/9012).
- - redis cache plugin - replace Python 2.6 construct with dict comprehensions
- (https://github.com/ansible-collections/community.general/pull/8833).
- - redis, redis_info - add ``client_cert`` and ``client_key`` options to specify
- path to certificate for Redis authentication (https://github.com/ansible-collections/community.general/pull/8654).
- - redis_info - adds support for getting cluster info (https://github.com/ansible-collections/community.general/pull/8464).
- - remove_keys filter plugin - replace Python 2.6 construct with dict comprehensions
- (https://github.com/ansible-collections/community.general/pull/8814).
- - replace_keys filter plugin - replace Python 2.6 construct with dict comprehensions
- (https://github.com/ansible-collections/community.general/pull/8814).
- - scaleway - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833).
- - scaleway module utils - replace Python 2.6 construct with dict comprehensions
- (https://github.com/ansible-collections/community.general/pull/8822).
- - scaleway_compute - replace Python 2.6 construct with dict comprehensions
- (https://github.com/ansible-collections/community.general/pull/8833).
- - scaleway_container - replace Python 2.6 construct with dict comprehensions
- (https://github.com/ansible-collections/community.general/pull/8858).
- - scaleway_container_info - replace Python 2.6 construct with dict comprehensions
- (https://github.com/ansible-collections/community.general/pull/8858).
- - scaleway_container_namespace - replace Python 2.6 construct with dict comprehensions
- (https://github.com/ansible-collections/community.general/pull/8858).
- - scaleway_container_namespace_info - replace Python 2.6 construct with dict
- comprehensions (https://github.com/ansible-collections/community.general/pull/8858).
- - scaleway_container_registry - replace Python 2.6 construct with dict comprehensions
- (https://github.com/ansible-collections/community.general/pull/8858).
- - scaleway_container_registry_info - replace Python 2.6 construct with dict
- comprehensions (https://github.com/ansible-collections/community.general/pull/8858).
- - scaleway_function - replace Python 2.6 construct with dict comprehensions
- (https://github.com/ansible-collections/community.general/pull/8858).
- - scaleway_function_info - replace Python 2.6 construct with dict comprehensions
- (https://github.com/ansible-collections/community.general/pull/8858).
- - scaleway_function_namespace - replace Python 2.6 construct with dict comprehensions
- (https://github.com/ansible-collections/community.general/pull/8858).
- - scaleway_function_namespace_info - replace Python 2.6 construct with dict
- comprehensions (https://github.com/ansible-collections/community.general/pull/8858).
- - scaleway_ip - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833).
- - scaleway_lb - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833).
- - scaleway_security_group - replace Python 2.6 construct with dict comprehensions
- (https://github.com/ansible-collections/community.general/pull/8822).
- - scaleway_security_group - replace Python 2.6 construct with dict comprehensions
- (https://github.com/ansible-collections/community.general/pull/8833).
- - scaleway_user_data - better construct when using ``dict.items()`` (https://github.com/ansible-collections/community.general/pull/8876).
- - scaleway_user_data - replace Python 2.6 construct with dict comprehensions
- (https://github.com/ansible-collections/community.general/pull/8833).
- - sensu_silence - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833).
- - snmp_facts - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833).
- - sorcery - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8833).
- - sudosu become plugin - added an option (``alt_method``) to enhance compatibility
- with more versions of ``su`` (https://github.com/ansible-collections/community.general/pull/8214).
- - udm_dns_record - replace loop with ``dict.update()`` (https://github.com/ansible-collections/community.general/pull/8876).
- - ufw - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8822).
- - unsafe plugin utils - replace Python 2.6 construct with dict comprehensions
- (https://github.com/ansible-collections/community.general/pull/8814).
- - vardict module utils - replace Python 2.6 construct with dict comprehensions
- (https://github.com/ansible-collections/community.general/pull/8814).
- - vars MH module utils - replace Python 2.6 construct with dict comprehensions
- (https://github.com/ansible-collections/community.general/pull/8814).
- - virtualbox inventory plugin - expose a new parameter ``enable_advanced_group_parsing``
- to change how the VirtualBox dynamic inventory parses VM groups (https://github.com/ansible-collections/community.general/issues/8508,
- https://github.com/ansible-collections/community.general/pull/8510).
- - vmadm - replace Python 2.6 construct with dict comprehensions (https://github.com/ansible-collections/community.general/pull/8822).
- - wdc_redfish_command - minor change to handle upgrade file for Redfish WD
- platforms (https://github.com/ansible-collections/community.general/pull/8444).
- release_summary: This is release 10.0.0 of ``community.general``, released on
- 2024-11-04.
- removed_features:
- - The consul_acl module has been removed. Use community.general.consul_token
- and/or community.general.consul_policy instead (https://github.com/ansible-collections/community.general/pull/8921).
- - The hipchat callback plugin has been removed. The hipchat service has been
- discontinued and the self-hosted variant has been End of Life since 2020
- (https://github.com/ansible-collections/community.general/pull/8921).
- - The redhat module utils has been removed (https://github.com/ansible-collections/community.general/pull/8921).
- - The rhn_channel module has been removed (https://github.com/ansible-collections/community.general/pull/8921).
- - The rhn_register module has been removed (https://github.com/ansible-collections/community.general/pull/8921).
- - consul - removed the ``ack_params_state_absent`` option. It had no effect
- anymore (https://github.com/ansible-collections/community.general/pull/8918).
- - ejabberd_user - removed the ``logging`` option (https://github.com/ansible-collections/community.general/pull/8918).
- - gitlab modules - remove basic auth feature (https://github.com/ansible-collections/community.general/pull/8405).
- - proxmox_kvm - removed the ``proxmox_default_behavior`` option. Explicitly
- specify the old default values if you were using ``proxmox_default_behavior=compatibility``,
- otherwise simply remove it (https://github.com/ansible-collections/community.general/pull/8918).
- - redhat_subscriptions - removed the ``pool`` option. Use ``pool_ids`` instead
- (https://github.com/ansible-collections/community.general/pull/8918).
- fragments:
- - 10.0.0.yml
- - 8051-Redfish-Wait-For-Service.yml
- - 8214-sudosu-not-working-on-some-BSD-machines.yml
- - 8402-add-diif-mode-openbsd-pkg.yml
- - 8403-fix-typeerror-in-keycloak-client.yaml
- - 8404-ipa_dnsrecord_sshfp.yml
- - 8405-gitlab-remove-basic-auth.yml
- - 8406-fix-homebrew-cask-warning.yaml
- - 8411-locale-gen-vardict.yml
- - 8413-galaxy-refactor.yml
- - 8415-cmd-runner-stack.yml
- - 8428-assign-auth-flow-by-name-keycloak-client.yaml
- - 8430-fix-opentelemetry-when-using-logs-with-uri-or-slurp-tasks.yaml
- - 8431-galaxy-upgrade.yml
- - 8440-allow-api-port-specification.yaml
- - 8444-fix-redfish-gen2-upgrade.yaml
- - 8452-git_config-absent.yml
- - 8453-git_config-deprecate-read.yml
- - 8464-redis-add-cluster-info.yml
- - 8471-proxmox-vm-info-network.yml
- - 8476-launchd-check-mode-changed.yaml
- - 8479-cmdrunner-improvements.yml
- - 8480-directory-feature-cargo.yml
- - 8489-fix-opennebula-inventory-crash-when-nic-has-no-ip.yml
- - 8496-keycloak_clientscope-add-normalizations.yaml
- - 8508-virtualbox-inventory.yml
- - 8512-as-bool-not.yml
- - 8514-pacman-empty.yml
- - 8516-proxmox-template-refactor.yml
- - 8517-cmd-runner-lang-auto.yml
- - 8532-expand-opennuebula-inventory-data.yml
- - 8533-add-ciphers-option.yml
- - 8542-fix-proxmox-volume-handling.yml
- - 8545-keycloak-clientscope-remove-id-on-compare.yml
- - 8557-fix-bug-with-bitwarden.yml
- - 8613-redfish_utils-language.yaml
- - 8614-nsupdate-index-out-of-range.yml
- - 8623-become-types.yml
- - 8624-cache-types.yml
- - 8625-inventory-types.yml
- - 8626-lookup-types.yml
- - 8627-connection-types.yml
- - 8628-callback-types.yml
- - 8632-pkgng-add-option-use_globs.yml
- - 8646-fix-bug-in-proxmox-volumes.yml
- - 8648-fix-gitlab-runner-paused.yaml
- - 8652-Redfish-Password-Change-Required.yml
- - 8654-add-redis-tls-params.yml
- - 8674-add-gitlab-project-cleanup-policy.yml
- - 8675-pipx-install-suffix.yml
- - 8679-fix-cloudflare-srv.yml
- - 8682-locale-gen-multiple.yaml
- - 8688-gitlab_project-add-new-params.yml
- - 8689-passwordstore-lock-naming.yml
- - 8695-keycloak_user_federation-mapper-removal.yml
- - 8708-homebrew_cask-fix-upgrade-all.yml
- - 8711-gconftool2-refactor.yml
- - 8713-proxmox_lxc_interfaces.yml
- - 8719-openiscsi-add-multiple-targets.yaml
- - 8735-keycloak_identity_provider-get-cleartext-secret-from-realm-info.yml
- - 8738-limit-packages-for-copr.yml
- - 8741-fix-opentelemetry-callback.yml
- - 8759-gitlab_project-sort-params.yml
- - 8760-gitlab_project-add-issues-access-level.yml
- - 8761-keycloak_user_federation-sort-desired-and-after-mappers-by-name.yml
- - 8762-keycloac_user_federation-fix-key-error-when-updating.yml
- - 8764-keycloak_user_federation-make-mapper-removal-optout.yml
- - 8766-mh-deco-improve.yml
- - 8776-mute-vardict-deprecation.yml
- - 8785-keycloak_user_federation-set-krbPrincipalAttribute-to-empty-string-if-missing.yaml
- - 8790-gitlab_project-fix-cleanup-policy-on-project-create.yml
- - 8791-mh-cause-changes-param-depr.yml
- - 8793-pipx-global.yml
- - 8794-Fixing-possible-concatination-error.yaml
- - 8796-gitlab-access-token-check-mode.yml
- - 8809-pipx-new-params.yml
- - 8812-keycloak-user-federation-remove-lastSync-param-from-kc-responses.yml
- - 8814-dict-comprehension.yml
- - 8822-dict-comprehension.yml
- - 8823-keycloak-realm-key.yml
- - 8831-fix-error-when-mapper-id-is-provided.yml
- - 8833-dict-comprehension.yml
- - 8855-gio_mime_vardict.yml
- - 8856-jira_vardict.yml
- - 8858-dict-comprehension.yml
- - 8876-dict-items-loop.yml
- - 8877-keycloak_realm-sort-lists-before-change-detection.yaml
- - 8885-add-force-flag-for-nmp.yml
- - 8887-fix-one_service-unique.yml
- - 8889-refactor-one-image-modules.yml
- - 8895-fix-comprehension.yaml
- - 8897-nmcli-add-reload-and-up-down.yml
- - 8898-add-arg-to-exclude-bind-credential-from-change-check.yaml
- - 8900-ipa-hostgroup-fix-states.yml
- - 8907-fix-one-host-id.yml
- - 8908-add-gitlab-group-params.yml
- - 8909-flatpak-improve-name-parsing.yaml
- - 8917-proxmox-clean-auth.yml
- - 8920-ipa-host-fix-state.yml
- - 8923-keycloak_userprofile-fix-empty-response-when-fetching-userprofile.yml
- - 8925-atomic.yml
- - 8928-cmd-runner-10.0.0.yml
- - 8929-cmd_runner-bugfix.yml
- - 8937-add-StorageId-RedfishURI-to-disk-facts.yml
- - 8940-keycloak_userprofile-improve-diff.yml
- - 8944-django-command-fix.yml
- - 8952-password-store-lookup-create-subkey-support.yml
- - 8954-keycloak-user-federation-add-referral-parameter.yml
- - 8956-remove-capacitybytes-from-the-required-parameters_list.yml
- - 8964-cmd-runner-argformat-refactor.yml
- - 8966-dig-add-port-option.yml
- - 8970-fix-dig-multi-nameservers.yml
- - 8973-keycloak_client-add-x509-auth.yml
- - 8979-keycloak_group-fix-subgroups.yml
- - 8987-legacycrypt.yml
- - 8989-github-app-token-from-fact.yml
- - 8990.yml
- - 9010-edit-gitlab-label-color.yaml
- - 9012-dell-pwrbutton-requires-a-job-initiated-at-reboot.yml
- - 9019-onevnet-bugfix.yml
- - 9022-improve-homebrew-perf.yml
- - 9026-consul_kv-datacenter.yml
- - 9027-support-organizations-in-keycloak-realm.yml
- - 9028-bitwarden-secrets-manager-syntax-fix.yml
- - 9044-pipx-fixes.yml
- - 9047-redfish-uri-parsing.yml
- - 9052-modprobe-bugfix.yml
- - 9056-fix-one_image-modules.yml
- - 9059-redfish_command-updateuseraccounttypes.yml
- - 9060-ansible-galaxy-install-version.yml
- - 9061-cpanm-version.yml
- - 9063-django-version.yml
- - 9064-gconftool2-version.yml
- - 9066-proxmox-kvm-ciupgrade.yml
- - 9067-gio-mime-version.yml
- - 9075-add-creation-oneimage.yml
- - 9084-collection_version-importlib.yml
- - 9084-jenkins_node-add-offline-message.yml
- - 9086-gio-mime-version.yml
- - 9087-mattermost-priority.yaml
- - 9092-keycloak-clientscope-type-fix-check-mode.yml
- - 9099-proxmox-fix-insecure.yml
- - deprecate-hipchat.yml
- - deprecations.yml
- - removals.yml
- modules:
- - description: Bootc Switch and Upgrade.
- name: bootc_manage
- namespace: ''
- - description: Add, modify, and delete checks within a consul cluster.
- name: consul_agent_check
- namespace: ''
- - description: Add, modify and delete services within a consul cluster.
- name: consul_agent_service
- namespace: ''
- - description: Wrapper for C(django-admin check).
- name: django_check
- namespace: ''
- - description: Wrapper for C(django-admin createcachetable).
- name: django_createcachetable
- namespace: ''
- - description: Services manager for Homebrew.
- name: homebrew_services
- namespace: ''
- - description: Manage keytab file in FreeIPA.
- name: ipa_getkeytab
- namespace: ''
- - description: Manage Jenkins nodes.
- name: jenkins_node
- namespace: ''
- - description: Allows administration of Keycloak components via Keycloak API.
- name: keycloak_component
- namespace: ''
- - description: Allows obtaining Keycloak realm keys metadata via Keycloak API.
- name: keycloak_realm_keys_metadata_info
- namespace: ''
- - description: Allows managing Keycloak User Profiles.
- name: keycloak_userprofile
- namespace: ''
- - description: Kerberos utils for managing tickets.
- name: krb_ticket
- namespace: ''
- - description: Manages OpenNebula virtual networks.
- name: one_vnet
- namespace: ''
- - description: List Zypper repositories.
- name: zypper_repository_info
- namespace: ''
- plugins:
- filter:
- - description: Keep specific keys from dictionaries in a list.
- name: keep_keys
- namespace: null
- - description: Remove specific keys from dictionaries in a list.
- name: remove_keys
- namespace: null
- - description: Replace specific keys in a list of dictionaries.
- name: replace_keys
- namespace: null
- - description: Return input type.
- name: reveal_ansible_type
- namespace: null
- test:
- - description: Validate input type.
- name: ansible_type
- namespace: null
- release_date: '2024-11-04'
- 10.0.1:
- changes:
- bugfixes:
- - keycloak_client - fix diff by removing code that turns the attributes dict
- which contains additional settings into a list (https://github.com/ansible-collections/community.general/pull/9077).
- - keycloak_clientscope - fix diff and ``end_state`` by removing the code that
- turns the attributes dict, which contains additional config items, into
- a list (https://github.com/ansible-collections/community.general/pull/9082).
- - redfish_utils module utils - remove undocumented default applytime (https://github.com/ansible-collections/community.general/pull/9114).
- release_summary: Bugfix release for inclusion in Ansible 11.0.0rc1.
- fragments:
- - 10.0.1.yml
- - 9077-keycloak_client-fix-attributes-dict-turned-into-list.yml
- - 9082-keycloak_clientscope-fix-attributes-dict-turned-into-list.yml
- - 9114-redfish-utils-update-remove-default-applytime.yml
- release_date: '2024-11-11'
- 10.1.0:
- changes:
- bugfixes:
- - dnf_config_manager - fix hanging when prompting to import GPG keys (https://github.com/ansible-collections/community.general/pull/9124,
- https://github.com/ansible-collections/community.general/issues/8830).
- - dnf_config_manager - forces locale to ``C`` before module starts. If the
- locale was set to non-English, the output of the ``dnf config-manager``
- could not be parsed (https://github.com/ansible-collections/community.general/pull/9157,
- https://github.com/ansible-collections/community.general/issues/9046).
- - flatpak - force the locale language to ``C`` when running the flatpak command
- (https://github.com/ansible-collections/community.general/pull/9187, https://github.com/ansible-collections/community.general/issues/8883).
- - gio_mime - fix command line when determining version of ``gio`` (https://github.com/ansible-collections/community.general/pull/9171,
- https://github.com/ansible-collections/community.general/issues/9158).
- - github_key - in check mode, a faulty call to ```datetime.strftime(...)```
- was being made which generated an exception (https://github.com/ansible-collections/community.general/issues/9185).
- - homebrew_cask - allow ``+`` symbol in Homebrew cask name validation regex
- (https://github.com/ansible-collections/community.general/pull/9128).
- - keycloak_clientscope_type - sort the default and optional clientscope lists
- to improve the diff (https://github.com/ansible-collections/community.general/pull/9202).
- - slack - fail if Slack API response is not OK with error message (https://github.com/ansible-collections/community.general/pull/9198).
- deprecated_features:
- - opkg - deprecate value ``""`` for parameter ``force`` (https://github.com/ansible-collections/community.general/pull/9172).
- - redfish_utils module utils - deprecate method ``RedfishUtils._init_session()``
- (https://github.com/ansible-collections/community.general/pull/9190).
- minor_changes:
- - alternatives - add ``family`` parameter that allows to utilize the ``--family``
- option available in RedHat version of update-alternatives (https://github.com/ansible-collections/community.general/issues/5060,
- https://github.com/ansible-collections/community.general/pull/9096).
- - cloudflare_dns - add support for ``comment`` and ``tags`` (https://github.com/ansible-collections/community.general/pull/9132).
- - deps module utils - add ``deps.clear()`` to clear out previously declared
- dependencies (https://github.com/ansible-collections/community.general/pull/9179).
- - homebrew - greatly speed up module when multiple packages are passed in
- the ``name`` option (https://github.com/ansible-collections/community.general/pull/9181).
- - homebrew - remove duplicated package name validation (https://github.com/ansible-collections/community.general/pull/9076).
- - iso_extract - adds ``password`` parameter that is passed to 7z (https://github.com/ansible-collections/community.general/pull/9159).
- - launchd - add ``plist`` option for services such as sshd, where the plist
- filename doesn't match the service name (https://github.com/ansible-collections/community.general/pull/9102).
- - nmcli - add ``sriov`` parameter that enables support for SR-IOV settings
- (https://github.com/ansible-collections/community.general/pull/9168).
- - pipx - add return value ``version`` (https://github.com/ansible-collections/community.general/pull/9180).
- - pipx_info - add return value ``version`` (https://github.com/ansible-collections/community.general/pull/9180).
- - proxmox_template - add server side artifact fetching support (https://github.com/ansible-collections/community.general/pull/9113).
- - redfish_command - add ``update_custom_oem_header``, ``update_custom_oem_params``,
- and ``update_custom_oem_mime_type`` options (https://github.com/ansible-collections/community.general/pull/9123).
- - redfish_utils module utils - remove redundant code (https://github.com/ansible-collections/community.general/pull/9190).
- - rpm_ostree_pkg - added the options ``apply_live`` (https://github.com/ansible-collections/community.general/pull/9167).
- - rpm_ostree_pkg - added the return value ``needs_reboot`` (https://github.com/ansible-collections/community.general/pull/9167).
- - scaleway_lb - minor simplification in the code (https://github.com/ansible-collections/community.general/pull/9189).
- - ssh_config - add ``dynamicforward`` option (https://github.com/ansible-collections/community.general/pull/9192).
- release_summary: Regular bugfix and feature release.
- fragments:
- - 10.1.0.yml
- - 5932-launchd-plist.yml
- - 7402-proxmox-template-support-server-side-artifact-fetching.yaml
- - 9076-remove-duplicated-homebrew-package-name-validation.yml
- - 9096-alternatives-add-family-parameter.yml
- - 9123-redfish-command-custom-oem-params.yml
- - 9124-dnf_config_manager.yml
- - 9128-homebrew_cask-name-regex-fix.yml
- - 9132-cloudflare_dns-comment-and-tags.yml
- - 9157-fix-dnf_config_manager-locale.yml
- - 9159-iso-extract_add_password.yml
- - 9167-rpm_ostree_pkg-apply_live.yml
- - 9168-nmcli-add-sriov-parameter.yml
- - 9171-gio-mime-fix-version.yml
- - 9172-opkg-deprecate-force-none.yml
- - 9179-deps-tests.yml
- - 9180-pipx-version.yml
- - 9181-improve-homebrew-module-performance.yml
- - 9186-fix-broken-check-mode-in-github-key.yml
- - 9187-flatpak-lang.yml
- - 9189-scalway-lb-simplify-return.yml
- - 9190-redfish-utils-unused-code.yml
- - 9198-fail-if-slack-api-response-is-not-ok-with-error-message.yml
- - 9202-keycloak_clientscope_type-sort-lists.yml
- - ssh_config_add_dynamicforward_option.yml
- modules:
- - description: Decompresses compressed files.
- name: decompress
- namespace: ''
- - description: Start a VM backup in Proxmox VE cluster.
- name: proxmox_backup
- namespace: ''
- plugins:
- filter:
- - description: Produce a list of accumulated sums of the input list contents.
- name: accumulate
- namespace: null
- release_date: '2024-12-02'
- 10.2.0:
- changes:
- bugfixes:
- - dig lookup plugin - correctly handle ``NoNameserver`` exception (https://github.com/ansible-collections/community.general/pull/9363,
- https://github.com/ansible-collections/community.general/issues/9362).
- - homebrew - fix incorrect handling of aliased homebrew modules when the alias
- is requested (https://github.com/ansible-collections/community.general/pull/9255,
- https://github.com/ansible-collections/community.general/issues/9240).
- - htpasswd - report changes when file permissions are adjusted (https://github.com/ansible-collections/community.general/issues/9485,
- https://github.com/ansible-collections/community.general/pull/9490).
- - proxmox_backup - fix incorrect key lookup in vmid permission check (https://github.com/ansible-collections/community.general/pull/9223).
- - proxmox_disk - fix async method and make ``resize_disk`` method handle errors
- correctly (https://github.com/ansible-collections/community.general/pull/9256).
- - proxmox_template - fix the wrong path called on ``proxmox_template.task_status``
- (https://github.com/ansible-collections/community.general/issues/9276, https://github.com/ansible-collections/community.general/pull/9277).
- - qubes connection plugin - fix the printing of debug information (https://github.com/ansible-collections/community.general/pull/9334).
- - redfish_utils module utils - Fix ``VerifyBiosAttributes`` command on multi
- system resource nodes (https://github.com/ansible-collections/community.general/pull/9234).
- deprecated_features:
- - atomic_container - module is deprecated and will be removed in community.general
- 13.0.0 (https://github.com/ansible-collections/community.general/pull/9487).
- - atomic_host - module is deprecated and will be removed in community.general
- 13.0.0 (https://github.com/ansible-collections/community.general/pull/9487).
- - atomic_image - module is deprecated and will be removed in community.general
- 13.0.0 (https://github.com/ansible-collections/community.general/pull/9487).
- - facter - module is deprecated and will be removed in community.general 12.0.0,
- use ``community.general.facter_facts`` instead (https://github.com/ansible-collections/community.general/pull/9451).
- - 'locale_gen - ``ubuntu_mode=True``, or ``mechanism=ubuntu_legacy`` is deprecated
- and will be removed in community.general 13.0.0 (https://github.com/ansible-collections/community.general/pull/9238).
-
- '
- - pure module utils - the module utils is deprecated and will be removed from
- community.general 12.0.0. The modules using this were removed in community.general
- 3.0.0 (https://github.com/ansible-collections/community.general/pull/9432).
- - purestorage doc fragments - the doc fragment is deprecated and will be removed
- from community.general 12.0.0. The modules using this were removed in community.general
- 3.0.0 (https://github.com/ansible-collections/community.general/pull/9432).
- - sensu_check - module is deprecated and will be removed in community.general
- 13.0.0, use collection ``sensu.sensu_go`` instead (https://github.com/ansible-collections/community.general/pull/9483).
- - sensu_client - module is deprecated and will be removed in community.general
- 13.0.0, use collection ``sensu.sensu_go`` instead (https://github.com/ansible-collections/community.general/pull/9483).
- - sensu_handler - module is deprecated and will be removed in community.general
- 13.0.0, use collection ``sensu.sensu_go`` instead (https://github.com/ansible-collections/community.general/pull/9483).
- - sensu_silence - module is deprecated and will be removed in community.general
- 13.0.0, use collection ``sensu.sensu_go`` instead (https://github.com/ansible-collections/community.general/pull/9483).
- - sensu_subscription - module is deprecated and will be removed in community.general
- 13.0.0, use collection ``sensu.sensu_go`` instead (https://github.com/ansible-collections/community.general/pull/9483).
- - slack - the default value ``auto`` of the ``prepend_hash`` option is deprecated
- and will change to ``never`` in community.general 12.0.0 (https://github.com/ansible-collections/community.general/pull/9443).
- - yaml callback plugin - deprecate plugin in favor of ``result_format=yaml``
- in plugin ``ansible.bulitin.default`` (https://github.com/ansible-collections/community.general/pull/9456).
- minor_changes:
- - bitwarden lookup plugin - use f-strings instead of interpolations or ``format``
- (https://github.com/ansible-collections/community.general/pull/9324).
- - cgroup_memory_recap callback plugin - use f-strings instead of interpolations
- or ``format`` (https://github.com/ansible-collections/community.general/pull/9321).
- - chef_databag lookup plugin - use f-strings instead of interpolations or
- ``format`` (https://github.com/ansible-collections/community.general/pull/9324).
- - chroot connection plugin - clean up string conversions (https://github.com/ansible-collections/community.general/pull/9379).
- - chroot connection plugin - use f-strings instead of interpolations or ``format``
- (https://github.com/ansible-collections/community.general/pull/9322).
- - cobbler inventory plugin - clean up string conversions (https://github.com/ansible-collections/community.general/pull/9379).
- - cobbler inventory plugin - use f-strings instead of interpolations or ``format``
- (https://github.com/ansible-collections/community.general/pull/9323).
- - collection_version lookup plugin - use f-strings instead of interpolations
- or ``format`` (https://github.com/ansible-collections/community.general/pull/9324).
- - consul_kv lookup plugin - use f-strings instead of interpolations or ``format``
- (https://github.com/ansible-collections/community.general/pull/9324).
- - context_demo callback plugin - use f-strings instead of interpolations or
- ``format`` (https://github.com/ansible-collections/community.general/pull/9321).
- - counter_enabled callback plugin - use f-strings instead of interpolations
- or ``format`` (https://github.com/ansible-collections/community.general/pull/9321).
- - credstash lookup plugin - use f-strings instead of interpolations or ``format``
- (https://github.com/ansible-collections/community.general/pull/9324).
- - cyberarkpassword lookup plugin - clean up string conversions (https://github.com/ansible-collections/community.general/pull/9379).
- - cyberarkpassword lookup plugin - use f-strings instead of interpolations
- or ``format`` (https://github.com/ansible-collections/community.general/pull/9324).
- - dense callback plugin - use f-strings instead of interpolations or ``format``
- (https://github.com/ansible-collections/community.general/pull/9321).
- - dependent lookup plugin - use f-strings instead of interpolations or ``format``
- (https://github.com/ansible-collections/community.general/pull/9324).
- - dig lookup plugin - clean up string conversions (https://github.com/ansible-collections/community.general/pull/9379).
- - dig lookup plugin - use f-strings instead of interpolations or ``format``
- (https://github.com/ansible-collections/community.general/pull/9324).
- - diy callback plugin - use f-strings instead of interpolations or ``format``
- (https://github.com/ansible-collections/community.general/pull/9321).
- - dnstxt lookup plugin - clean up string conversions (https://github.com/ansible-collections/community.general/pull/9379).
- - dnstxt lookup plugin - use f-strings instead of interpolations or ``format``
- (https://github.com/ansible-collections/community.general/pull/9324).
- - doas become plugin - use f-strings instead of interpolations or ``format``
- (https://github.com/ansible-collections/community.general/pull/9319).
- - dsv lookup plugin - use f-strings instead of interpolations or ``format``
- (https://github.com/ansible-collections/community.general/pull/9324).
- - dzdo become plugin - use f-strings instead of interpolations or ``format``
- (https://github.com/ansible-collections/community.general/pull/9319).
- - elastic callback plugin - use f-strings instead of interpolations or ``format``
- (https://github.com/ansible-collections/community.general/pull/9321).
- - etcd lookup plugin - use f-strings instead of interpolations or ``format``
- (https://github.com/ansible-collections/community.general/pull/9324).
- - etcd3 lookup plugin - clean up string conversions (https://github.com/ansible-collections/community.general/pull/9379).
- - etcd3 lookup plugin - use f-strings instead of interpolations or ``format``
- (https://github.com/ansible-collections/community.general/pull/9324).
- - filetree lookup plugin - use f-strings instead of interpolations or ``format``
- (https://github.com/ansible-collections/community.general/pull/9324).
- - from_csv filter plugin - clean up string conversions (https://github.com/ansible-collections/community.general/pull/9379).
- - from_ini filter plugin - clean up string conversions (https://github.com/ansible-collections/community.general/pull/9379).
- - funcd connection plugin - use f-strings instead of interpolations or ``format``
- (https://github.com/ansible-collections/community.general/pull/9322).
- - github_app_access_token lookup plugin - use f-strings instead of interpolations
- or ``format`` (https://github.com/ansible-collections/community.general/pull/9324).
- - gitlab_instance_variable - add support for ``raw`` variables suboption (https://github.com/ansible-collections/community.general/pull/9425).
- - gitlab_runners inventory plugin - clean up string conversions (https://github.com/ansible-collections/community.general/pull/9379).
- - gitlab_runners inventory plugin - use f-strings instead of interpolations
- or ``format`` (https://github.com/ansible-collections/community.general/pull/9323).
- - hiera lookup plugin - use f-strings instead of interpolations or ``format``
- (https://github.com/ansible-collections/community.general/pull/9324).
- - icinga2 inventory plugin - use f-strings instead of interpolations or ``format``
- (https://github.com/ansible-collections/community.general/pull/9323).
- - incus connection plugin - use f-strings instead of interpolations or ``format``
- (https://github.com/ansible-collections/community.general/pull/9322).
- - iocage connection plugin - use f-strings instead of interpolations or ``format``
- (https://github.com/ansible-collections/community.general/pull/9322).
- - iocage inventory plugin - clean up string conversions (https://github.com/ansible-collections/community.general/pull/9379).
- - iptables_state action plugin - use f-strings instead of interpolations or
- ``format`` (https://github.com/ansible-collections/community.general/pull/9318).
- - jabber callback plugin - use f-strings instead of interpolations or ``format``
- (https://github.com/ansible-collections/community.general/pull/9321).
- - jail connection plugin - use f-strings instead of interpolations or ``format``
- (https://github.com/ansible-collections/community.general/pull/9322).
- - keycloak - add an action group for Keycloak modules to allow ``module_defaults``
- to be set for Keycloak tasks (https://github.com/ansible-collections/community.general/pull/9284).
- - keyring lookup plugin - use f-strings instead of interpolations or ``format``
- (https://github.com/ansible-collections/community.general/pull/9324).
- - ksu become plugin - use f-strings instead of interpolations or ``format``
- (https://github.com/ansible-collections/community.general/pull/9319).
- - lastpass lookup plugin - use f-strings instead of interpolations or ``format``
- (https://github.com/ansible-collections/community.general/pull/9324).
- - linode inventory plugin - use f-strings instead of interpolations or ``format``
- (https://github.com/ansible-collections/community.general/pull/9323).
- - lmdb_kv lookup plugin - clean up string conversions (https://github.com/ansible-collections/community.general/pull/9379).
- - lmdb_kv lookup plugin - use f-strings instead of interpolations or ``format``
- (https://github.com/ansible-collections/community.general/pull/9324).
- - locale_gen - invert the logic to determine ``ubuntu_mode``, making it look
- first for ``/etc/locale.gen`` (set ``ubuntu_mode`` to ``False``) and only
- then looking for ``/var/lib/locales/supported.d/`` (set ``ubuntu_mode``
- to ``True``) (https://github.com/ansible-collections/community.general/pull/9238,
- https://github.com/ansible-collections/community.general/issues/9131, https://github.com/ansible-collections/community.general/issues/8487).
- - 'locale_gen - new return value ``mechanism`` to better express the semantics
- of the ``ubuntu_mode``, with the possible values being either ``glibc``
- (``ubuntu_mode=False``) or ``ubuntu_legacy`` (``ubuntu_mode=True``) (https://github.com/ansible-collections/community.general/pull/9238).
-
- '
- - log_plays callback plugin - use f-strings instead of interpolations or ``format``
- (https://github.com/ansible-collections/community.general/pull/9321).
- - loganalytics callback plugin - use f-strings instead of interpolations or
- ``format`` (https://github.com/ansible-collections/community.general/pull/9321).
- - logdna callback plugin - use f-strings instead of interpolations or ``format``
- (https://github.com/ansible-collections/community.general/pull/9321).
- - logentries callback plugin - clean up string conversions (https://github.com/ansible-collections/community.general/pull/9379).
- - logentries callback plugin - use f-strings instead of interpolations or
- ``format`` (https://github.com/ansible-collections/community.general/pull/9321).
- - lxc connection plugin - use f-strings instead of interpolations or ``format``
- (https://github.com/ansible-collections/community.general/pull/9322).
- - lxd connection plugin - use f-strings instead of interpolations or ``format``
- (https://github.com/ansible-collections/community.general/pull/9322).
- - lxd inventory plugin - clean up string conversions (https://github.com/ansible-collections/community.general/pull/9379).
- - lxd inventory plugin - use f-strings instead of interpolations or ``format``
- (https://github.com/ansible-collections/community.general/pull/9323).
- - machinectl become plugin - use f-strings instead of interpolations or ``format``
- (https://github.com/ansible-collections/community.general/pull/9319).
- - mail callback plugin - use f-strings instead of interpolations or ``format``
- (https://github.com/ansible-collections/community.general/pull/9321).
- - manageiq_alert_profiles - improve handling of parameter requirements (https://github.com/ansible-collections/community.general/pull/9449).
- - manifold lookup plugin - clean up string conversions (https://github.com/ansible-collections/community.general/pull/9379).
- - manifold lookup plugin - use f-strings instead of interpolations or ``format``
- (https://github.com/ansible-collections/community.general/pull/9324).
- - memcached cache plugin - use f-strings instead of interpolations or ``format``
- (https://github.com/ansible-collections/community.general/pull/9320).
- - merge_variables lookup plugin - use f-strings instead of interpolations
- or ``format`` (https://github.com/ansible-collections/community.general/pull/9324).
- - nmap inventory plugin - clean up string conversions (https://github.com/ansible-collections/community.general/pull/9379).
- - nmap inventory plugin - use f-strings instead of interpolations or ``format``
- (https://github.com/ansible-collections/community.general/pull/9323).
- - nrdp callback plugin - use f-strings instead of interpolations or ``format``
- (https://github.com/ansible-collections/community.general/pull/9321).
- - onepassword lookup plugin - clean up string conversions (https://github.com/ansible-collections/community.general/pull/9379).
- - onepassword lookup plugin - use f-strings instead of interpolations or ``format``
- (https://github.com/ansible-collections/community.general/pull/9324).
- - onepassword_doc lookup plugin - use f-strings instead of interpolations
- or ``format`` (https://github.com/ansible-collections/community.general/pull/9324).
- - online inventory plugin - use f-strings instead of interpolations or ``format``
- (https://github.com/ansible-collections/community.general/pull/9323).
- - opennebula inventory plugin - clean up string conversions (https://github.com/ansible-collections/community.general/pull/9379).
- - opennebula inventory plugin - use f-strings instead of interpolations or
- ``format`` (https://github.com/ansible-collections/community.general/pull/9323).
- - opentelemetry callback plugin - remove code handling Python versions prior
- to 3.7 (https://github.com/ansible-collections/community.general/pull/9482).
- - opentelemetry callback plugin - remove code handling Python versions prior
- to 3.7 (https://github.com/ansible-collections/community.general/pull/9503).
- - opentelemetry callback plugin - use f-strings instead of interpolations
- or ``format`` (https://github.com/ansible-collections/community.general/pull/9321).
- - pacemaker_cluster - remove unused code (https://github.com/ansible-collections/community.general/pull/9471).
- - pacemaker_cluster - using safer mechanism to run external command (https://github.com/ansible-collections/community.general/pull/9471).
- - passwordstore lookup plugin - use f-strings instead of interpolations or
- ``format`` (https://github.com/ansible-collections/community.general/pull/9324).
- - pbrun become plugin - use f-strings instead of interpolations or ``format``
- (https://github.com/ansible-collections/community.general/pull/9319).
- - pfexec become plugin - use f-strings instead of interpolations or ``format``
- (https://github.com/ansible-collections/community.general/pull/9319).
- - pmrun become plugin - use f-strings instead of interpolations or ``format``
- (https://github.com/ansible-collections/community.general/pull/9319).
- - proxmox inventory plugin - clean up string conversions (https://github.com/ansible-collections/community.general/pull/9379).
- - proxmox inventory plugin - strip whitespace from ``user``, ``token_id``,
- and ``token_secret`` (https://github.com/ansible-collections/community.general/issues/9227,
- https://github.com/ansible-collections/community.general/pull/9228/).
- - proxmox inventory plugin - use f-strings instead of interpolations or ``format``
- (https://github.com/ansible-collections/community.general/pull/9323).
- - proxmox module utils - add method ``api_task_complete`` that can wait for
- task completion and return error message (https://github.com/ansible-collections/community.general/pull/9256).
- - proxmox_backup - refactor permission checking to improve code readability
- and maintainability (https://github.com/ansible-collections/community.general/pull/9239).
- - qubes connection plugin - use f-strings instead of interpolations or ``format``
- (https://github.com/ansible-collections/community.general/pull/9322).
- - random_pet lookup plugin - use f-strings instead of interpolations or ``format``
- (https://github.com/ansible-collections/community.general/pull/9324).
- - redis cache plugin - clean up string conversions (https://github.com/ansible-collections/community.general/pull/9379).
- - redis cache plugin - use f-strings instead of interpolations or ``format``
- (https://github.com/ansible-collections/community.general/pull/9320).
- - redis lookup plugin - use f-strings instead of interpolations or ``format``
- (https://github.com/ansible-collections/community.general/pull/9324).
- - revbitspss lookup plugin - use f-strings instead of interpolations or ``format``
- (https://github.com/ansible-collections/community.general/pull/9324).
- - saltstack connection plugin - use f-strings instead of interpolations or
- ``format`` (https://github.com/ansible-collections/community.general/pull/9322).
- - say callback plugin - use f-strings instead of interpolations or ``format``
- (https://github.com/ansible-collections/community.general/pull/9321).
- - scaleway inventory plugin - clean up string conversions (https://github.com/ansible-collections/community.general/pull/9379).
- - scaleway inventory plugin - use f-strings instead of interpolations or ``format``
- (https://github.com/ansible-collections/community.general/pull/9323).
- - selective callback plugin - use f-strings instead of interpolations or ``format``
- (https://github.com/ansible-collections/community.general/pull/9321).
- - sesu become plugin - use f-strings instead of interpolations or ``format``
- (https://github.com/ansible-collections/community.general/pull/9319).
- - shelvefile lookup plugin - use f-strings instead of interpolations or ``format``
- (https://github.com/ansible-collections/community.general/pull/9324).
- - shutdown action plugin - clean up string conversions (https://github.com/ansible-collections/community.general/pull/9379).
- - shutdown action plugin - use f-strings instead of interpolations or ``format``
- (https://github.com/ansible-collections/community.general/pull/9318).
- - slack callback plugin - clean up string conversions (https://github.com/ansible-collections/community.general/pull/9379).
- - slack callback plugin - use f-strings instead of interpolations or ``format``
- (https://github.com/ansible-collections/community.general/pull/9321).
- - splunk callback plugin - use f-strings instead of interpolations or ``format``
- (https://github.com/ansible-collections/community.general/pull/9321).
- - stackpath_compute inventory plugin - use f-strings instead of interpolations
- or ``format`` (https://github.com/ansible-collections/community.general/pull/9323).
- - sudosu become plugin - use f-strings instead of interpolations or ``format``
- (https://github.com/ansible-collections/community.general/pull/9319).
- - timestamp callback plugin - use f-strings instead of interpolations or ``format``
- (https://github.com/ansible-collections/community.general/pull/9321).
- - to_ini filter plugin - clean up string conversions (https://github.com/ansible-collections/community.general/pull/9379).
- - tss lookup plugin - clean up string conversions (https://github.com/ansible-collections/community.general/pull/9379).
- - tss lookup plugin - use f-strings instead of interpolations or ``format``
- (https://github.com/ansible-collections/community.general/pull/9324).
- - unixy callback plugin - use f-strings instead of interpolations or ``format``
- (https://github.com/ansible-collections/community.general/pull/9321).
- - virtualbox inventory plugin - clean up string conversions (https://github.com/ansible-collections/community.general/pull/9379).
- - virtualbox inventory plugin - use f-strings instead of interpolations or
- ``format`` (https://github.com/ansible-collections/community.general/pull/9323).
- - xbps - add ``root`` and ``repository`` options to enable bootstrapping new
- void installations (https://github.com/ansible-collections/community.general/pull/9174).
- - xen_orchestra inventory plugin - use f-strings instead of interpolations
- or ``format`` (https://github.com/ansible-collections/community.general/pull/9323).
- - xfconf - add return value ``version`` (https://github.com/ansible-collections/community.general/pull/9226).
- - xfconf_info - add return value ``version`` (https://github.com/ansible-collections/community.general/pull/9226).
- - yaml callback plugin - use f-strings instead of interpolations or ``format``
- (https://github.com/ansible-collections/community.general/pull/9321).
- - zone connection plugin - use f-strings instead of interpolations or ``format``
- (https://github.com/ansible-collections/community.general/pull/9322).
- - zypper - add ``quiet`` option (https://github.com/ansible-collections/community.general/pull/9270).
- - zypper - add ``simple_errors`` option (https://github.com/ansible-collections/community.general/pull/9270).
- release_summary: Regular bugfix and feature release.
- security_fixes:
- - keycloak_authentication - API calls did not properly set the ``priority``
- during update resulting in incorrectly sorted authentication flows. This
- apparently only affects Keycloak 25 or newer (https://github.com/ansible-collections/community.general/pull/9263).
- fragments:
- - 10.2.0.yml
- - 9174-xbps-support-rootdir-and-repository.yml
- - 9223-proxmox-backup-bugfixes.yml
- - 9226-xfconf-version.yml
- - 9228-fix-issue-header.yml
- - 9234-fix-verify-bios-attributes-multi-system.yml
- - 9238-locale-gen-rewrite.yml
- - 9239-proxmox-backup-refactor.yml
- - 9255-fix-handling-of-aliased-homebrew-packages.yml
- - 9256-proxmox_disk-fix-async-method-of-resize_disk.yml
- - 9263-kc_authentication-api-priority.yaml
- - 9270-zypper-add-simple_errors.yaml
- - 9277-proxmox_template-fix-the-wrong-path-called-on-proxmox_template.task_status.yaml
- - 9284-add-keycloak-action-group.yml
- - 9318-fstr-actionplugins.yml
- - 9319-fstr-become-plugins.yml
- - 9320-fstr-cache-plugins.yml
- - 9321-fstr-callback-plugins.yml
- - 9322-fstr-connection-plugins.yml
- - 9323-fstr-inventory-plugins.yml
- - 9324-fstr-lookup-plugins.yml
- - 9334-qubes-conn.yml
- - 9363-dig-nonameservers.yml
- - 9379-refactor.yml
- - 9387-pacemaker-cluster-cmd.yml
- - 9425-gitlab-instance-raw-variable.yml
- - 9432-deprecate-pure.yml
- - 9443-slack-prepend_hash.yml
- - 9449-manageiq-alert-profiles-reqs.yml
- - 9451-facter-deprecation.yml
- - 9456-yaml-callback-deprecation.yml
- - 9482-opentelemetry-python-37.yml
- - 9483-sensu-deprecation.yml
- - 9487-atomic-deprecation.yml
- - 9490-htpasswd-permissions.yml
- - 9503-opentelemetry-remove-unused-code.yml
- modules:
- - description: Manages Android SDK packages.
- name: android_sdk
- namespace: ''
- - description: Use the Modify-Increment LDAP V3 feature to increment an attribute
- value.
- name: ldap_inc
- namespace: ''
- - description: C(systemd)'s C(systemd-creds decrypt) plugin.
- name: systemd_creds_decrypt
- namespace: ''
- - description: C(systemd)'s C(systemd-creds encrypt) plugin.
- name: systemd_creds_encrypt
- namespace: ''
- plugins:
- inventory:
- - description: iocage inventory source.
- name: iocage
- namespace: null
- release_date: '2024-12-31'
- 10.3.0:
- changes:
- bugfixes:
- - homebrew - fix incorrect handling of homebrew modules when a tap is requested
- (https://github.com/ansible-collections/community.general/pull/9546, https://github.com/ansible-collections/community.general/issues/9533).
- - iocage inventory plugin - the plugin parses the IP4 tab of the jails list
- and put the elements into the new variable ``iocage_ip4_dict``. In multiple
- interface format the variable ``iocage_ip4`` keeps the comma-separated list
- of IP4 (https://github.com/ansible-collections/community.general/issues/9538).
- - pipx - honor option ``global`` when ``state=latest`` (https://github.com/ansible-collections/community.general/pull/9623).
- - proxmox - fixes idempotency of template conversions (https://github.com/ansible-collections/community.general/pull/9225,
- https://github.com/ansible-collections/community.general/issues/8811).
- - proxmox - fixes incorrect parsing for bind-only mounts (https://github.com/ansible-collections/community.general/pull/9225,
- https://github.com/ansible-collections/community.general/issues/8982).
- - proxmox - fixes issues with disk_volume variable (https://github.com/ansible-collections/community.general/pull/9225,
- https://github.com/ansible-collections/community.general/issues/9065).
- - proxmox module utils - fixes ignoring of ``choose_first_if_multiple`` argument
- in ``get_vmid`` (https://github.com/ansible-collections/community.general/pull/9225).
- - 'redhat_subscription - do not try to unsubscribe (i.e. remove subscriptions)
-
- when unregistering a system: newer versions of subscription-manager, as
-
- available in EL 10 and Fedora 41+, do not support entitlements anymore,
- and
-
- thus unsubscribing will fail
-
- (https://github.com/ansible-collections/community.general/pull/9578).
-
- '
- deprecated_features:
- - 'MH module utils - attribute ``debug`` definition in subclasses of MH is
- now deprecated, as that name will become a delegation to ``AnsibleModule``
- in community.general 12.0.0, and any such attribute will be overridden by
- that delegation in that version (https://github.com/ansible-collections/community.general/pull/9577).
-
- '
- - proxmox - removes default value ``false`` of ``update`` parameter. This
- will be changed to a default of ``true`` in community.general 11.0.0 (https://github.com/ansible-collections/community.general/pull/9225).
- minor_changes:
- - MH module utils - delegate ``debug`` to the underlying ``AnsibleModule``
- instance or issues a warning if an attribute already exists with that name
- (https://github.com/ansible-collections/community.general/pull/9577).
- - apache2_mod_proxy - better handling regexp extraction (https://github.com/ansible-collections/community.general/pull/9609).
- - apache2_mod_proxy - change type of ``state`` to a list of strings. No change
- for the users (https://github.com/ansible-collections/community.general/pull/9600).
- - apache2_mod_proxy - improve readability when using results from ``fecth_url()``
- (https://github.com/ansible-collections/community.general/pull/9608).
- - apache2_mod_proxy - refactor repeated code into method (https://github.com/ansible-collections/community.general/pull/9599).
- - apache2_mod_proxy - remove unused parameter and code from ``Balancer`` constructor
- (https://github.com/ansible-collections/community.general/pull/9614).
- - apache2_mod_proxy - simplified and improved string manipulation (https://github.com/ansible-collections/community.general/pull/9614).
- - apache2_mod_proxy - use ``deps`` to handle dependencies (https://github.com/ansible-collections/community.general/pull/9612).
- - cgroup_memory_recap callback plugin - adjust standard preamble for Python
- 3 (https://github.com/ansible-collections/community.general/pull/9583).
- - chroot connection plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9584).
- - cloud_init_data_facts - open file using ``open()`` as a context manager
- (https://github.com/ansible-collections/community.general/pull/9579).
- - cobbler inventory plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9584).
- - context_demo callback plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9583).
- - counter filter plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9585).
- - counter_enabled callback plugin - adjust standard preamble for Python 3
- (https://github.com/ansible-collections/community.general/pull/9583).
- - cpanm - enable usage of option ``--with-recommends`` (https://github.com/ansible-collections/community.general/issues/9554,
- https://github.com/ansible-collections/community.general/pull/9555).
- - cpanm - enable usage of option ``--with-suggests`` (https://github.com/ansible-collections/community.general/pull/9555).
- - crc32 filter plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9585).
- - cronvar - open file using ``open()`` as a context manager (https://github.com/ansible-collections/community.general/pull/9579).
- - crypttab - open file using ``open()`` as a context manager (https://github.com/ansible-collections/community.general/pull/9579).
- - default_without_diff callback plugin - adjust standard preamble for Python
- 3 (https://github.com/ansible-collections/community.general/pull/9583).
- - dense callback plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9583).
- - dict filter plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9585).
- - dict_kv filter plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9585).
- - diy callback plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9583).
- - doas become plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9583).
- - dzdo become plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9583).
- - elastic callback plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9583).
- - from_csv filter plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9585).
- - from_ini filter plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9585).
- - funcd connection plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9584).
- - gitlab_runners inventory plugin - adjust standard preamble for Python 3
- (https://github.com/ansible-collections/community.general/pull/9584).
- - groupby_as_dict filter plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9585).
- - hashids filter plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9585).
- - icinga2 inventory plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9584).
- - incus connection plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9584).
- - iocage connection plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9584).
- - iocage inventory plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9584).
- - iocage inventory plugin - the new parameter ``sudo`` of the plugin lets
- the command ``iocage list -l`` to run as root on the iocage host. This is
- needed to get the IPv4 of a running DHCP jail (https://github.com/ansible-collections/community.general/issues/9572,
- https://github.com/ansible-collections/community.general/pull/9573).
- - iptables_state action plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9583).
- - jabber callback plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9583).
- - jail connection plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9584).
- - jc filter plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9585).
- - jira - transition operation now has ``status_id`` to directly reference
- wanted transition (https://github.com/ansible-collections/community.general/pull/9602).
- - json_query filter plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9585).
- - keep_keys filter plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9585).
- - keycloak_* modules - ``refresh_token`` parameter added. When multiple authentication
- parameters are provided (``token``, ``refresh_token``, and ``auth_username``/``auth_password``),
- modules will now automatically retry requests upon authentication errors
- (401), using in order the token, refresh token, and username/password (https://github.com/ansible-collections/community.general/pull/9494).
- - known_hosts - open file using ``open()`` as a context manager (https://github.com/ansible-collections/community.general/pull/9579).
- - ksu become plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9583).
- - linode inventory plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9584).
- - lists filter plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9585).
- - lists_mergeby filter plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9585).
- - log_plays callback plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9583).
- - loganalytics callback plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9583).
- - logdna callback plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9583).
- - logentries callback plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9583).
- - logstash callback plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9583).
- - lxc connection plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9584).
- - lxd connection plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9584).
- - lxd inventory plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9584).
- - machinectl become plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9583).
- - mail callback plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9583).
- - memcached cache plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9583).
- - nmap inventory plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9584).
- - nmcli - add a option ``fail_over_mac`` (https://github.com/ansible-collections/community.general/issues/9570,
- https://github.com/ansible-collections/community.general/pull/9571).
- - nrdp callback plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9583).
- - null callback plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9583).
- - one_template - adds ``filter`` option for retrieving templates which are
- not owned by the user (https://github.com/ansible-collections/community.general/pull/9547,
- https://github.com/ansible-collections/community.general/issues/9278).
- - online inventory plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9584).
- - opennebula inventory plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9584).
- - opentelemetry callback plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9583).
- - parted - open file using ``open()`` as a context manager (https://github.com/ansible-collections/community.general/pull/9579).
- - pbrun become plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9583).
- - pfexec become plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9583).
- - pickle cache plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9583).
- - pmrun become plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9583).
- - proxmox - refactors the proxmox module (https://github.com/ansible-collections/community.general/pull/9225).
- - proxmox inventory plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9584).
- - proxmox_pct_remote connection plugin - adjust standard preamble for Python
- 3 (https://github.com/ansible-collections/community.general/pull/9584).
- - proxmox_template - add support for checksum validation with new options
- ``checksum_algorithm`` and ``checksum`` (https://github.com/ansible-collections/community.general/issues/9553,
- https://github.com/ansible-collections/community.general/pull/9601).
- - pulp_repo - open file using ``open()`` as a context manager (https://github.com/ansible-collections/community.general/pull/9579).
- - qubes connection plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9584).
- - random_mac filter plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9585).
- - redfish_info - add command ``GetAccountServiceConfig`` to get full information
- about AccountService configuration (https://github.com/ansible-collections/community.general/pull/9403).
- - redhat_subscription - open file using ``open()`` as a context manager (https://github.com/ansible-collections/community.general/pull/9579).
- - redis cache plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9583).
- - remove_keys filter plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9585).
- - replace_keys filter plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9585).
- - reveal_ansible_type filter plugin - adjust standard preamble for Python
- 3 (https://github.com/ansible-collections/community.general/pull/9585).
- - run0 become plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9583).
- - saltstack connection plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9584).
- - say callback plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9583).
- - scaleway inventory plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9584).
- - selective callback plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9583).
- - sesu become plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9583).
- - shutdown action plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9583).
- - slack callback plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9583).
- - snap - add return value ``version`` (https://github.com/ansible-collections/community.general/pull/9598).
- - snap_alias - add return value ``version`` (https://github.com/ansible-collections/community.general/pull/9598).
- - solaris_zone - open file using ``open()`` as a context manager (https://github.com/ansible-collections/community.general/pull/9579).
- - sorcery - open file using ``open()`` as a context manager (https://github.com/ansible-collections/community.general/pull/9579).
- - splunk callback plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9583).
- - stackpath_compute inventory plugin - adjust standard preamble for Python
- 3 (https://github.com/ansible-collections/community.general/pull/9584).
- - sudosu become plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9583).
- - sumologic callback plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9583).
- - syslog_json callback plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9583).
- - time filter plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9585).
- - timestamp callback plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9583).
- - timezone - open file using ``open()`` as a context manager (https://github.com/ansible-collections/community.general/pull/9579).
- - to_ini filter plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9585).
- - ufw - add support for ``vrrp`` protocol (https://github.com/ansible-collections/community.general/issues/9562,
- https://github.com/ansible-collections/community.general/pull/9582).
- - unicode_normalize filter plugin - adjust standard preamble for Python 3
- (https://github.com/ansible-collections/community.general/pull/9585).
- - unixy callback plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9583).
- - version_sort filter plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9585).
- - virtualbox inventory plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9584).
- - xen_orchestra inventory plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9584).
- - yaml cache plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9583).
- - yaml callback plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9583).
- - zone connection plugin - adjust standard preamble for Python 3 (https://github.com/ansible-collections/community.general/pull/9584).
- release_summary: Regular bugfix and feature release.
- security_fixes:
- - keycloak_client - Sanitize ``saml.encryption.private.key`` so it does not
- show in the logs (https://github.com/ansible-collections/community.general/pull/9621).
- fragments:
- - 10.3.0.yml
- - 9225-proxmox-module-refactoring.yml
- - 9403-redfish-add-get-accountservice.yml
- - 9494-keycloak-modules-retry-request-on-authentication-error.yaml
- - 9539-iocage-inventory-dhcp.yml
- - 9546-fix-handling-of-tap-homebrew-packages.yml
- - 9547-one_template-filter.yml
- - 9554-add-cpanm-option_with-recommends-and-suggests.yml
- - 9570-feat-nmcli-add-fail-over-mac-parameter.yml
- - 9573-iocage-inventory-sudo.yml
- - 9577-mh-delegate-debug.yml
- - 9578-redhat_subscription-no-remove-on-unregister.yml
- - 9579-with-open.yml
- - 9582-add-support-for-vrrp.yml
- - 9583-py3-imports-actionbecomecachecallback.yml
- - 9584-py3-imports-connectioninventory.yml
- - 9585-py3-imports-filter.yml
- - 9586-allow-transition-id-jira.yml
- - 9598-snap-version.yml
- - 9599-apache2-mod-proxy-revamp1.yml
- - 9600-apache2-mod-proxy-revamp2.yml
- - 9601-proxmox-template-support-for-checksums.yml
- - 9608-apache2-mod-proxy-revamp3.yml
- - 9609-apache2-mod-proxy-revamp4.yml
- - 9612-apache2-mod-proxy-revamp5.yml
- - 9614-apache2-mod-proxy-revamp7.yml
- - 9621-keycloak_client-sanitize-saml-encryption-key.yml
- - 9623-pipx-global-latest.yml
- modules:
- - description: Retrieve information on Proxmox scheduled backups.
- name: proxmox_backup_info
- namespace: ''
- plugins:
- connection:
- - description: Run tasks in Proxmox LXC container instances using pct CLI
- via SSH.
- name: proxmox_pct_remote
- namespace: null
- filter:
- - description: Create a JSON patch by comparing two JSON files.
- name: json_diff
- namespace: null
- - description: Apply a JSON-Patch (RFC 6902) operation to an object.
- name: json_patch
- namespace: null
- - description: Apply JSON-Patch (RFC 6902) operations to an object.
- name: json_patch_recipe
- namespace: null
- lookup:
- - description: Fetch SSH keys stored in 1Password.
- name: onepassword_ssh_key
- namespace: null
- release_date: '2025-01-27'
- 10.3.1:
- changes:
- bugfixes:
- - cloudflare_dns - fix crash when deleting a DNS record or when updating a
- record with ``solo=true`` (https://github.com/ansible-collections/community.general/issues/9652,
- https://github.com/ansible-collections/community.general/pull/9649).
- - homebrew - make package name parsing more resilient (https://github.com/ansible-collections/community.general/pull/9665,
- https://github.com/ansible-collections/community.general/issues/9641).
- - keycloak module utils - replaces missing return in get_role_composites method
- which caused it to return None instead of composite roles (https://github.com/ansible-collections/community.general/issues/9678,
- https://github.com/ansible-collections/community.general/pull/9691).
- - keycloak_client - fix and improve existing tests. The module showed a diff
- without actual changes, solved by improving the ``normalise_cr()`` function
- (https://github.com/ansible-collections/community.general/pull/9644).
- - proxmox - adds the ``pubkey`` parameter (back to) the ``update`` state (https://github.com/ansible-collections/community.general/issues/9642,
- https://github.com/ansible-collections/community.general/pull/9645).
- - proxmox - fixes a typo in the translation of the ``pubkey`` parameter to
- proxmox' ``ssh-public-keys`` (https://github.com/ansible-collections/community.general/issues/9642,
- https://github.com/ansible-collections/community.general/pull/9645).
- - xml - ensure file descriptor is closed (https://github.com/ansible-collections/community.general/pull/9695).
- minor_changes:
- - onepassword_ssh_key - refactor to move code to lookup class (https://github.com/ansible-collections/community.general/pull/9633).
- release_summary: Bugfix release.
- fragments:
- - 10.3.1.yml
- - 9633-onepassword_ssh_key.yml
- - 9644-kc_client-test-improvement-and-fix.yaml
- - 9645-proxmox-fix-pubkey.yml
- - 9649-cloudflare_dns-fix-crash-when-deleting-record.yml
- - 9665-more-resilient-handling-of-homebrew-packages-names.yml
- - 9691-keycloak-module-utils-replace-missing-return-in-get_role_composites.yml
- - 9695-xml-close-file.yml
- release_date: '2025-02-10'
- 10.4.0:
- changes:
- bugfixes:
- - apache2_mod_proxy - make compatible with Python 3 (https://github.com/ansible-collections/community.general/pull/9762).
- - apache2_mod_proxy - passing the cluster's page as referer for the member's
- pages. This makes the module actually work again for halfway modern Apache
- versions. According to some comments founds on the net the referer was required
- since at least 2019 for some versions of Apache 2 (https://github.com/ansible-collections/community.general/pull/9762).
- - 'elasticsearch_plugin - fix ``ERROR: D is not a recognized option`` issue
- when configuring proxy settings (https://github.com/ansible-collections/community.general/pull/9774,
- https://github.com/ansible-collections/community.general/issues/9773).'
- - ipa_host - module revoked existing host certificates even if ``user_certificate``
- was not given (https://github.com/ansible-collections/community.general/pull/9694).
- - keycloak_client - in check mode, detect whether the lists in before client
- (for example redirect URI list) contain items that the lists in the desired
- client do not contain (https://github.com/ansible-collections/community.general/pull/9739).
- - lldp - fix crash caused by certain lldpctl output where an attribute is
- defined as branch and leaf (https://github.com/ansible-collections/community.general/pull/9657).
- - onepassword_doc lookup plugin - ensure that 1Password Connect support also
- works for this plugin (https://github.com/ansible-collections/community.general/pull/9625).
- - passwordstore lookup plugin - fix subkey creation even when ``create=false``
- (https://github.com/ansible-collections/community.general/issues/9105, https://github.com/ansible-collections/community.general/pull/9106).
- - 'proxmox inventory plugin - plugin did not update cache correctly after
- ``meta: refresh_inventory`` (https://github.com/ansible-collections/community.general/issues/9710,
- https://github.com/ansible-collections/community.general/pull/9760).'
- - 'redhat_subscription - use the "enable_content" option (when available)
- when
-
- registering using D-Bus, to ensure that subscription-manager enables the
-
- content on registration; this is particular important on EL 10+ and Fedora
-
- 41+
-
- (https://github.com/ansible-collections/community.general/pull/9778).
-
- '
- - zfs - fix handling of multi-line values of user-defined ZFS properties (https://github.com/ansible-collections/community.general/pull/6264).
- - zfs_facts - parameter ``type`` now accepts multple values as documented
- (https://github.com/ansible-collections/community.general/issues/5909, https://github.com/ansible-collections/community.general/pull/9697).
- deprecated_features:
- - profitbricks - module is deprecated and will be removed in community.general
- 11.0.0 (https://github.com/ansible-collections/community.general/pull/9733).
- - profitbricks_datacenter - module is deprecated and will be removed in community.general
- 11.0.0 (https://github.com/ansible-collections/community.general/pull/9733).
- - profitbricks_nic - module is deprecated and will be removed in community.general
- 11.0.0 (https://github.com/ansible-collections/community.general/pull/9733).
- - profitbricks_volume - module is deprecated and will be removed in community.general
- 11.0.0 (https://github.com/ansible-collections/community.general/pull/9733).
- - profitbricks_volume_attachments - module is deprecated and will be removed
- in community.general 11.0.0 (https://github.com/ansible-collections/community.general/pull/9733).
- minor_changes:
- - bitwarden lookup plugin - add new option ``collection_name`` to filter results
- by collection name, and new option ``result_count`` to validate number of
- results (https://github.com/ansible-collections/community.general/pull/9728).
- - incus connection plugin - adds ``remote_user`` and ``incus_become_method``
- parameters for allowing a non-root user to connect to an Incus instance
- (https://github.com/ansible-collections/community.general/pull/9743).
- - iocage inventory plugin - the new parameter ``hooks_results`` of the plugin
- is a list of files inside a jail that provide configuration parameters for
- the inventory. The inventory plugin reads the files from the jails and put
- the contents into the items of created variable ``iocage_hooks`` (https://github.com/ansible-collections/community.general/issues/9650,
- https://github.com/ansible-collections/community.general/pull/9651).
- - jira - adds ``client_cert`` and ``client_key`` parameters for supporting
- client certificate authentification when connecting to Jira (https://github.com/ansible-collections/community.general/pull/9753).
- - lldp - adds ``multivalues`` parameter to control behavior when lldpctl outputs
- an attribute multiple times (https://github.com/ansible-collections/community.general/pull/9657).
- - lvg - add ``remove_extra_pvs`` parameter to control if ansible should remove
- physical volumes which are not in the ``pvs`` parameter (https://github.com/ansible-collections/community.general/pull/9698).
- - lxd connection plugin - adds ``remote_user`` and ``lxd_become_method`` parameters
- for allowing a non-root user to connect to an LXD instance (https://github.com/ansible-collections/community.general/pull/9659).
- - nmcli - adds VRF support with new ``type`` value ``vrf`` and new ``slave_type``
- value ``vrf`` as well as new ``table`` parameter (https://github.com/ansible-collections/community.general/pull/9658,
- https://github.com/ansible-collections/community.general/issues/8014).
- - proxmox_kvm - allow hibernation and suspending of VMs (https://github.com/ansible-collections/community.general/issues/9620,
- https://github.com/ansible-collections/community.general/pull/9653).
- - redfish_command - add ``PowerFullPowerCycle`` to power command options (https://github.com/ansible-collections/community.general/pull/9729).
- - ssh_config - add ``other_options`` option (https://github.com/ansible-collections/community.general/issues/8053,
- https://github.com/ansible-collections/community.general/pull/9684).
- - xen_orchestra inventory plugin - add ``use_vm_uuid`` and ``use_host_uuid``
- boolean options to allow switching over to using VM/Xen name labels instead
- of UUIDs as item names (https://github.com/ansible-collections/community.general/pull/9787).
- release_summary: Regular bugfix and feature release.
- fragments:
- - 10.4.0.yaml
- - 6264-zfs-multiline-property-value.yml
- - 9106-passwordstore-fix-subkey-creation-even-when-create-==-false.yml
- - 9625-onepassword_doc.yml
- - 9651-iocage-inventory-hooks.yml
- - 9653-proxmox-kvm-allow-vm-hibernation.yml
- - 9657-lldp-handling-attributes-defined-multiple-times.yml
- - 9658-add-vrf-commands-to-nmcli-module.yml
- - 9659-lxd_connection-nonroot-user.yml
- - 9694-ipa-host-certificate-revoked.yml
- - 9697-zfs-facts-type.yml
- - 9698-lvg-remove-extra-pvs-parameter.yml
- - 9728-bitwarden-collection-name-filter.yml
- - 9729-redfish-fullpowercycle-command.yml
- - 9733-profitbrick-deprecation.yml
- - 9739-keycloak_client-compare-before-desired-directly.yml
- - 9743-incus_connection-nonroot-user.yml
- - 9753-jira-add-client-certificate-auth.yml
- - 9760-proxmox-inventory.yml
- - 9762-apache2_mod_proxy.yml
- - 9774-fix-elasticsearch_plugin-proxy-settings.yml
- - 9778-redhat_subscription-ensure-to-enable-content.yml
- - 9787-xoa_allow_using_names_in_inventory.yml
- - ssh_config_add_other_options.yml
- modules:
- - description: Gather C(systemd) unit info.
- name: systemd_info
- namespace: ''
- release_date: '2025-02-24'
- 10.5.0:
- changes:
- bugfixes:
- - cloudlare_dns - handle exhausted response stream in case of HTTP errors
- to show nice error message to the user (https://github.com/ansible-collections/community.general/issues/9782,
- https://github.com/ansible-collections/community.general/pull/9818).
- - dnf_versionlock - add support for dnf5 (https://github.com/ansible-collections/community.general/issues/9556).
- - homebrew - fix crash when package names include tap (https://github.com/ansible-collections/community.general/issues/9777,
- https://github.com/ansible-collections/community.general/pull/9803).
- - homebrew_cask - handle unusual brew version strings (https://github.com/ansible-collections/community.general/issues/8432,
- https://github.com/ansible-collections/community.general/pull/9881).
- - nmcli - enable changing only the order of DNS servers or search suffixes
- (https://github.com/ansible-collections/community.general/issues/8724, https://github.com/ansible-collections/community.general/pull/9880).
- - proxmox - add missing key selection of ``'status'`` key to ``get_lxc_status``
- (https://github.com/ansible-collections/community.general/issues/9696, https://github.com/ansible-collections/community.general/pull/9809).
- - proxmox_vm_info - the module no longer expects that the key ``template``
- exists in a dictionary returned by Proxmox (https://github.com/ansible-collections/community.general/issues/9875,
- https://github.com/ansible-collections/community.general/pull/9910).
- - sudoers - display stdout and stderr raised while failed validation (https://github.com/ansible-collections/community.general/issues/9674,
- https://github.com/ansible-collections/community.general/pull/9871).
- minor_changes:
- - CmdRunner module utils - the convenience method ``cmd_runner_fmt.as_fixed()``
- now accepts multiple arguments as a list (https://github.com/ansible-collections/community.general/pull/9893).
- - apache2_mod_proxy - code simplification, no change in functionality (https://github.com/ansible-collections/community.general/pull/9457).
- - consul_token - fix idempotency when ``policies`` or ``roles`` are supplied
- by name (https://github.com/ansible-collections/community.general/issues/9841,
- https://github.com/ansible-collections/community.general/pull/9845).
- - keycloak_realm - remove ID requirement when creating a realm to allow Keycloak
- generating its own realm ID (https://github.com/ansible-collections/community.general/pull/9768).
- - nmap inventory plugin - adds ``dns_servers`` option for specifying DNS servers
- for name resolution. Accepts hostnames or IP addresses in the same format
- as the ``exclude`` option (https://github.com/ansible-collections/community.general/pull/9849).
- - proxmox_kvm - add missing audio hardware device handling (https://github.com/ansible-collections/community.general/issues/5192,
- https://github.com/ansible-collections/community.general/pull/9847).
- - redfish_config - add command ``SetPowerRestorePolicy`` to set the desired
- power state of the system when power is restored (https://github.com/ansible-collections/community.general/pull/9837).
- - redfish_info - add command ``GetPowerRestorePolicy`` to get the desired
- power state of the system when power is restored (https://github.com/ansible-collections/community.general/pull/9824).
- - rocketchat - option ``is_pre740`` has been added to control the format of
- the payload. For Rocket.Chat 7.4.0 or newer, it must be set to ``false``
- (https://github.com/ansible-collections/community.general/pull/9882).
- - slack callback plugin - add ``http_agent`` option to enable the user to
- set a custom user agent for slack callback plugin (https://github.com/ansible-collections/community.general/issues/9813,
- https://github.com/ansible-collections/community.general/pull/9836).
- - systemd_info - add wildcard expression support in ``unitname`` option (https://github.com/ansible-collections/community.general/pull/9821).
- - systemd_info - extend support to timer units (https://github.com/ansible-collections/community.general/pull/9891).
- - vmadm - add new options ``flexible_disk_size`` and ``owner_uuid`` (https://github.com/ansible-collections/community.general/pull/9892).
- release_summary: Regular bugfix and feature release.
- fragments:
- - 10.5.0.yml
- - 9457-apache2-mod-proxy-revamp.yml
- - 9768-keycloak_realm-remove-id-requirement.yaml
- - 9777-homebrew-fix-crash-when-packages-include-tap.yml
- - 9809-proxmox-fix-status-getter.yml
- - 9818-cloudflare-dns-exhausted-response.yml
- - 9821-systemd_info-add-wildcards.yml
- - 9824-redfish-implement-obtaining-powerrestorepolicy.yml
- - 9836-option-for-http-agent-for-user-to-callback-slack.yml
- - 9837-redfish-implement-setting-powerrestorepolicy.yml
- - 9845-consul_token_idempotency.yml
- - 9847-Adding_audio_device-support_to_proxmox_kvm.yml
- - 9849-nmap_dns_servers.yml
- - 9875-proxmox-dont-expect-key-template-to-exist.yml
- - 9880-nmcli-fix-reorder-same-dns-nameservers-search-suffixes.yml
- - 9882-fix-payload-to-match-rocketchat-740-requirement.yml
- - 9891-systemd_info-add_timer.yml
- - 9892-vmadm-add-new-options.yml
- - 9893-cmdrunner-as-fixed-args.yml
- - dnf_versionlock.yml
- - homebrew_cask.yml
- - sudoers.yml
- modules:
- - description: Manage pacemaker resources.
- name: pacemaker_resource
- namespace: ''
- release_date: '2025-03-24'
- 10.6.0:
- changes:
- bugfixes:
- - dependent look plugin - make compatible with ansible-core's Data Tagging
- feature (https://github.com/ansible-collections/community.general/pull/9833).
- - diy callback plugin - make compatible with ansible-core's Data Tagging feature
- (https://github.com/ansible-collections/community.general/pull/9833).
- - "github_deploy_key - check that key really exists on 422\_to avoid masking\
- \ other errors (https://github.com/ansible-collections/community.general/issues/6718,\
- \ https://github.com/ansible-collections/community.general/pull/10011)."
- - hashids and unicode_normalize filter plugins - avoid deprecated ``AnsibleFilterTypeError``
- on ansible-core 2.19 (https://github.com/ansible-collections/community.general/pull/9992).
- - homebrew - emit a useful error message if ``brew info`` reports a package
- tap is ``null`` (https://github.com/ansible-collections/community.general/pull/10013,
- https://github.com/ansible-collections/community.general/issues/10012).
- - java_cert - the module no longer fails if the optional parameters ``pkcs12_alias``
- and ``cert_alias`` are not provided (https://github.com/ansible-collections/community.general/pull/9970).
- - keycloak_authentication - fix authentification config duplication for Keycloak
- < 26.2.0 (https://github.com/ansible-collections/community.general/pull/9987).
- - keycloak_client - fix the idempotency regression by normalizing the Keycloak
- response for ``after_client`` (https://github.com/ansible-collections/community.general/issues/9905,
- https://github.com/ansible-collections/community.general/pull/9976).
- - proxmox inventory plugin - fix ``ansible_host`` staying empty for certain
- Proxmox nodes (https://github.com/ansible-collections/community.general/issues/5906,
- https://github.com/ansible-collections/community.general/pull/9952).
- - proxmox_disk - fail gracefully if ``storage`` is required but not provided
- by the user (https://github.com/ansible-collections/community.general/issues/9941,
- https://github.com/ansible-collections/community.general/pull/9963).
- - reveal_ansible_type filter plugin and ansible_type test plugin - make compatible
- with ansible-core's Data Tagging feature (https://github.com/ansible-collections/community.general/pull/9833).
- - sysrc - no longer always reporting ``changed=true`` when ``state=absent``.
- This fixes the method ``exists()`` (https://github.com/ansible-collections/community.general/issues/10004,
- https://github.com/ansible-collections/community.general/pull/10005).
- - yaml callback plugin - use ansible-core internals to avoid breakage with
- Data Tagging (https://github.com/ansible-collections/community.general/pull/9833).
- deprecated_features:
- - manifold lookup plugin - plugin is deprecated and will be removed in community.general
- 11.0.0 (https://github.com/ansible-collections/community.general/pull/10028).
- - stackpath_compute inventory plugin - plugin is deprecated and will be removed
- in community.general 11.0.0 (https://github.com/ansible-collections/community.general/pull/10026).
- known_issues:
- - reveal_ansible_type filter plugin and ansible_type test plugin - note that
- ansible-core's Data Tagging feature implements new aliases, such as ``_AnsibleTaggedStr``
- for ``str``, ``_AnsibleTaggedInt`` for ``int``, and ``_AnsibleTaggedFloat``
- for ``float`` (https://github.com/ansible-collections/community.general/pull/9833).
- minor_changes:
- - apache2_module - added workaround for new PHP module name, from ``php7_module``
- to ``php_module`` (https://github.com/ansible-collections/community.general/pull/9951).
- - gitlab_project - add option ``build_timeout`` (https://github.com/ansible-collections/community.general/pull/9960).
- - gitlab_project_members - extend choices parameter ``access_level`` by missing
- upstream valid value ``owner`` (https://github.com/ansible-collections/community.general/pull/9953).
- - 'hpilo_boot - add option to get an idempotent behavior while powering on
- server, resulting in success instead of failure when using ``state: boot_once``
- option (https://github.com/ansible-collections/community.general/pull/9646).'
- - idrac_redfish_command, idrac_redfish_config, idrac_redfish_info - add ``validate_certs``,
- ``ca_path``, and ``ciphers`` options to configure TLS/SSL (https://github.com/ansible-collections/community.general/issues/3686,
- https://github.com/ansible-collections/community.general/pull/9964).
- - ilo_redfish_command, ilo_redfish_config, ilo_redfish_info - add ``validate_certs``,
- ``ca_path``, and ``ciphers`` options to configure TLS/SSL (https://github.com/ansible-collections/community.general/issues/3686,
- https://github.com/ansible-collections/community.general/pull/9964).
- - keycloak module_utils - user groups can now be referenced by their name,
- like ``staff``, or their path, like ``/staff/engineering``. The path syntax
- allows users to reference subgroups, which is not possible otherwise (https://github.com/ansible-collections/community.general/pull/9898).
- - keycloak_user module - user groups can now be referenced by their name,
- like ``staff``, or their path, like ``/staff/engineering``. The path syntax
- allows users to reference subgroups, which is not possible otherwise (https://github.com/ansible-collections/community.general/pull/9898).
- - nmcli - add support for Infiniband MAC setting when ``type`` is ``infiniband``
- (https://github.com/ansible-collections/community.general/pull/9962).
- - 'one_vm - update allowed values for ``updateconf`` to include new parameters
- as per the latest OpenNebula API documentation.
-
- Added parameters:
-
-
- * ``OS``: ``FIRMWARE``;
-
- * ``CPU_MODEL``: ``MODEL``, ``FEATURES``;
-
- * ``FEATURES``: ``VIRTIO_BLK_QUEUES``, ``VIRTIO_SCSI_QUEUES``, ``IOTHREADS``;
-
- * ``GRAPHICS``: ``PORT``, ``COMMAND``;
-
- * ``VIDEO``: ``ATS``, ``IOMMU``, ``RESOLUTION``, ``TYPE``, ``VRAM``;
-
- * ``RAW``: ``VALIDATE``;
-
- * ``BACKUP_CONFIG``: ``FS_FREEZE``, ``KEEP_LAST``, ``BACKUP_VOLATILE``,
- ``MODE``, ``INCREMENT_MODE``.
-
-
- (https://github.com/ansible-collections/community.general/pull/9959).'
- - proxmox and proxmox_kvm modules - allow uppercase characters in VM/container
- tags (https://github.com/ansible-collections/community.general/issues/9895,
- https://github.com/ansible-collections/community.general/pull/10024).
- - puppet - improve parameter formatting, no impact to user (https://github.com/ansible-collections/community.general/pull/10014).
- - redfish module utils - add ``REDFISH_COMMON_ARGUMENT_SPEC``, a corresponding
- ``redfish`` docs fragment, and support for its ``validate_certs``, ``ca_path``,
- and ``ciphers`` options (https://github.com/ansible-collections/community.general/issues/3686,
- https://github.com/ansible-collections/community.general/pull/9964).
- - redfish_command, redfish_config, redfish_info - add ``validate_certs`` and
- ``ca_path`` options to configure TLS/SSL (https://github.com/ansible-collections/community.general/issues/3686,
- https://github.com/ansible-collections/community.general/pull/9964).
- - rocketchat - fix duplicate JSON conversion for Rocket.Chat < 7.4.0 (https://github.com/ansible-collections/community.general/pull/9965).
- - wdc_redfish_command, wdc_redfish_info - add ``validate_certs``, ``ca_path``,
- and ``ciphers`` options to configure TLS/SSL (https://github.com/ansible-collections/community.general/issues/3686,
- https://github.com/ansible-collections/community.general/pull/9964).
- - xcc_redfish_command - add ``validate_certs``, ``ca_path``, and ``ciphers``
- options to configure TLS/SSL (https://github.com/ansible-collections/community.general/issues/3686,
- https://github.com/ansible-collections/community.general/pull/9964).
- - zypper - adds ``skip_post_errors`` that allows to skip RPM post-install
- errors (Zypper return code 107) (https://github.com/ansible-collections/community.general/issues/9972).
- release_summary: Regular bugfix and feature release.
- fragments:
- - 10.6.0.yml
- - 10005-fix-method-exists-in-sysrc.yml
- - 10011-github_deploy_key-check-key-present.yml
- - 10012-improve-error-handling-homebrew-missing-tap.yml
- - 10014-puppet-improve-param.yml
- - 10026-stackpath-compute-deprecation.yml
- - 10028-manifold-deprecation.yml
- - 9646-hpilo-fix-idempotency.yml
- - 9833-data-tagging.yml
- - 9895-proxmox_tags_with_uppercase_chars.yml
- - 9898-keycloak_user-supports-subgroups.yaml
- - 9951-mod-php-identifier.yml
- - 9952-proxmox-inventory-plugin-improve-ansible_host.yml
- - 9953-gitlab-project-members-support-owner-level.yml
- - 9959-update-opennebula-onevm-updateconf-params.yml
- - 9960-gitlab_project-add-build_timeout-option.yml
- - 9962-nmcli-add-infiniband-mac-support.yml
- - 9963-proxmox_disk-storage.yml
- - 9964-redfish-tls.yml
- - 9965-fix-duplicate-jsonify-payload-for-rocketchat-pre740.yml
- - 9970-pkcs12_alias_cert_alias_optional.yml
- - 9972-zypper-skip-post-errors.yml
- - 9976-keycloak_client-fix-idempotency-regression.yml
- - 9987-keycloak-auth-flow-fix-config.yaml
- - 9992-filtertypeerror.yml
- plugins:
- connection:
- - description: Run tasks in WSL distribution using wsl.exe CLI via SSH.
- name: wsl
- namespace: null
- release_date: '2025-04-21'
+ancestor: 11.0.0
+releases: {}
diff --git a/changelogs/config.yaml b/changelogs/config.yaml
index 32ffe27f2b..578b8c3765 100644
--- a/changelogs/config.yaml
+++ b/changelogs/config.yaml
@@ -7,9 +7,9 @@ changelog_filename_template: ../CHANGELOG.rst
changelog_filename_version_depth: 0
changes_file: changelog.yaml
changes_format: combined
+ignore_other_fragment_extensions: true
keep_fragments: false
mention_ancestor: true
-flatmap: true
new_plugins_after_name: removed_features
notesdir: fragments
output_formats:
@@ -40,3 +40,4 @@ use_fqcn: true
add_plugin_period: true
changelog_nice_yaml: true
changelog_sort: version
+vcs: auto
diff --git a/changelogs/fragments/10227-pacemaker-cluster-and-resource-enhancement.yml b/changelogs/fragments/10227-pacemaker-cluster-and-resource-enhancement.yml
new file mode 100644
index 0000000000..d1cfee7816
--- /dev/null
+++ b/changelogs/fragments/10227-pacemaker-cluster-and-resource-enhancement.yml
@@ -0,0 +1,7 @@
+deprecated_features:
+ - pacemaker_cluster - the parameter ``state`` will become a required parameter in community.general 12.0.0 (https://github.com/ansible-collections/community.general/pull/10227).
+
+minor_changes:
+ - pacemaker_cluster - add ``state=maintenance`` for managing pacemaker maintenance mode (https://github.com/ansible-collections/community.general/issues/10200, https://github.com/ansible-collections/community.general/pull/10227).
+ - pacemaker_cluster - rename ``node`` to ``name`` and add ``node`` alias (https://github.com/ansible-collections/community.general/pull/10227).
+ - pacemaker_resource - enhance module by removing duplicative code (https://github.com/ansible-collections/community.general/pull/10227).
diff --git a/changelogs/fragments/10231-keycloak-add-client-credentials-authentication.yml b/changelogs/fragments/10231-keycloak-add-client-credentials-authentication.yml
new file mode 100644
index 0000000000..eec12e8669
--- /dev/null
+++ b/changelogs/fragments/10231-keycloak-add-client-credentials-authentication.yml
@@ -0,0 +1,2 @@
+minor_changes:
+ - keycloak - add support for ``grant_type=client_credentials`` to all keycloak modules, so that specifying ``auth_client_id`` and ``auth_client_secret`` is sufficient for authentication (https://github.com/ansible-collections/community.general/pull/10231).
diff --git a/changelogs/fragments/10267-add-cloudflare-ptr-record-support.yml b/changelogs/fragments/10267-add-cloudflare-ptr-record-support.yml
new file mode 100644
index 0000000000..29d71ca393
--- /dev/null
+++ b/changelogs/fragments/10267-add-cloudflare-ptr-record-support.yml
@@ -0,0 +1,2 @@
+minor_changes:
+ - cloudflare_dns - adds support for PTR records (https://github.com/ansible-collections/community.general/pull/10267).
diff --git a/changelogs/fragments/10269-cloudflare-dns-refactor.yml b/changelogs/fragments/10269-cloudflare-dns-refactor.yml
new file mode 100644
index 0000000000..9f91040d63
--- /dev/null
+++ b/changelogs/fragments/10269-cloudflare-dns-refactor.yml
@@ -0,0 +1,2 @@
+minor_changes:
+ - cloudflare_dns - simplify validations and refactor some code, no functional changes (https://github.com/ansible-collections/community.general/pull/10269).
diff --git a/changelogs/fragments/10271--disable_lookups.yml b/changelogs/fragments/10271--disable_lookups.yml
new file mode 100644
index 0000000000..d28e2ac833
--- /dev/null
+++ b/changelogs/fragments/10271--disable_lookups.yml
@@ -0,0 +1,3 @@
+bugfixes:
+ - "icinga2 inventory plugin - avoid using deprecated option when templating options (https://github.com/ansible-collections/community.general/pull/10271)."
+ - "linode inventory plugin - avoid using deprecated option when templating options (https://github.com/ansible-collections/community.general/pull/10271)."
diff --git a/changelogs/fragments/10285-fstr-plugins.yml b/changelogs/fragments/10285-fstr-plugins.yml
new file mode 100644
index 0000000000..6fff590fee
--- /dev/null
+++ b/changelogs/fragments/10285-fstr-plugins.yml
@@ -0,0 +1,7 @@
+minor_changes:
+ - dense callback plugin - use f-strings instead of concatenation (https://github.com/ansible-collections/community.general/pull/10285).
+ - mail callback plugin - use f-strings instead of concatenation (https://github.com/ansible-collections/community.general/pull/10285).
+ - wsl connection plugin - use f-strings instead of concatenation (https://github.com/ansible-collections/community.general/pull/10285).
+ - jc filter plugin - use f-strings instead of concatenation (https://github.com/ansible-collections/community.general/pull/10285).
+ - iocage inventory plugin - use f-strings instead of concatenation (https://github.com/ansible-collections/community.general/pull/10285).
+ - xen_orchestra inventory plugin - use f-strings instead of concatenation (https://github.com/ansible-collections/community.general/pull/10285).
diff --git a/changelogs/fragments/10311-xfconf-refactor.yml b/changelogs/fragments/10311-xfconf-refactor.yml
new file mode 100644
index 0000000000..9d71bd17d8
--- /dev/null
+++ b/changelogs/fragments/10311-xfconf-refactor.yml
@@ -0,0 +1,2 @@
+minor_changes:
+ - xfconf - minor adjustments the the code (https://github.com/ansible-collections/community.general/pull/10311).
diff --git a/changelogs/fragments/10323-nmcli-improvements.yml b/changelogs/fragments/10323-nmcli-improvements.yml
new file mode 100644
index 0000000000..53436ea7d6
--- /dev/null
+++ b/changelogs/fragments/10323-nmcli-improvements.yml
@@ -0,0 +1,2 @@
+minor_changes:
+ - nmcli - simplify validations and refactor some code, no functional changes (https://github.com/ansible-collections/community.general/pull/10323).
diff --git a/changelogs/fragments/10328-redundant-brackets.yml b/changelogs/fragments/10328-redundant-brackets.yml
new file mode 100644
index 0000000000..f8f74a336c
--- /dev/null
+++ b/changelogs/fragments/10328-redundant-brackets.yml
@@ -0,0 +1,32 @@
+minor_changes:
+ - logstash callback plugin - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328).
+ - keycloak module utils - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328).
+ - python_runner module utils - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328).
+ - cloudflare_dns - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328).
+ - crypttab - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328).
+ - datadog_monitor - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328).
+ - gitlab_deploy_key - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328).
+ - gitlab_group_access_token - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328).
+ - gitlab_hook - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328).
+ - gitlab_project_access_token - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328).
+ - gitlab_runner - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328).
+ - ipa_group - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328).
+ - jenkins_build - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328).
+ - jenkins_build_info - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328).
+ - nmcli - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328).
+ - oneandone_firewall_policy - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328).
+ - oneandone_load_balancer - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328).
+ - oneandone_monitoring_policy - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328).
+ - onepassword_info - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328).
+ - osx_defaults - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328).
+ - ovh_ip_loadbalancing_backend - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328).
+ - packet_device - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328).
+ - pagerduty - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328).
+ - pingdom - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328).
+ - rhevm - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328).
+ - rocketchat - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328).
+ - sensu_silence - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328).
+ - sl_vm - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328).
+ - urpmi - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328).
+ - xattr - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328).
+ - xml - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328).
diff --git a/changelogs/fragments/10329-catapult-deprecation.yml b/changelogs/fragments/10329-catapult-deprecation.yml
new file mode 100644
index 0000000000..5e5209edda
--- /dev/null
+++ b/changelogs/fragments/10329-catapult-deprecation.yml
@@ -0,0 +1,2 @@
+deprecated_features:
+ - catapult - module is deprecated and will be removed in community.general 13.0.0 (https://github.com/ansible-collections/community.general/issues/10318, https://github.com/ansible-collections/community.general/pull/10329).
diff --git a/changelogs/fragments/10339-github_app_access_token.yml b/changelogs/fragments/10339-github_app_access_token.yml
new file mode 100644
index 0000000000..00cd71f559
--- /dev/null
+++ b/changelogs/fragments/10339-github_app_access_token.yml
@@ -0,0 +1,2 @@
+bugfixes:
+ - github_release - support multiple types of GitHub tokens; no longer failing when ``ghs_`` token type is provided (https://github.com/ansible-collections/community.general/issues/10338, https://github.com/ansible-collections/community.general/pull/10339).
\ No newline at end of file
diff --git a/changelogs/fragments/10349-incus_connection-error-handling.yml b/changelogs/fragments/10349-incus_connection-error-handling.yml
new file mode 100644
index 0000000000..b35da354d2
--- /dev/null
+++ b/changelogs/fragments/10349-incus_connection-error-handling.yml
@@ -0,0 +1,2 @@
+bugfixes:
+ - incus connection plugin - fix error handling to return more useful Ansible errors to the user (https://github.com/ansible-collections/community.general/issues/10344, https://github.com/ansible-collections/community.general/pull/10349).
diff --git a/changelogs/fragments/10359-dependent.yml b/changelogs/fragments/10359-dependent.yml
new file mode 100644
index 0000000000..e48a6142e8
--- /dev/null
+++ b/changelogs/fragments/10359-dependent.yml
@@ -0,0 +1,2 @@
+bugfixes:
+ - "dependent lookup plugin - avoid deprecated ansible-core 2.19 functionality (https://github.com/ansible-collections/community.general/pull/10359)."
diff --git a/changelogs/fragments/10417-sysrc-refactor.yml b/changelogs/fragments/10417-sysrc-refactor.yml
new file mode 100644
index 0000000000..b1b5db632b
--- /dev/null
+++ b/changelogs/fragments/10417-sysrc-refactor.yml
@@ -0,0 +1,4 @@
+minor_changes:
+ - sysrc - adjustments to the code (https://github.com/ansible-collections/community.general/pull/10417).
+bugfixes:
+ - sysrc - fixes parsing with multi-line variables (https://github.com/ansible-collections/community.general/issues/10394, https://github.com/ansible-collections/community.general/pull/10417).
\ No newline at end of file
diff --git a/changelogs/fragments/10442-apk-fix-empty-names.yml b/changelogs/fragments/10442-apk-fix-empty-names.yml
new file mode 100644
index 0000000000..24d68b52df
--- /dev/null
+++ b/changelogs/fragments/10442-apk-fix-empty-names.yml
@@ -0,0 +1,3 @@
+bugfixes:
+ - apk - handle empty name strings properly
+ (https://github.com/ansible-collections/community.general/issues/10441, https://github.com/ansible-collections/community.general/pull/10442).
\ No newline at end of file
diff --git a/changelogs/fragments/10445-cronvar-reject-empty-values.yml b/changelogs/fragments/10445-cronvar-reject-empty-values.yml
new file mode 100644
index 0000000000..1bf39619cc
--- /dev/null
+++ b/changelogs/fragments/10445-cronvar-reject-empty-values.yml
@@ -0,0 +1,2 @@
+bugfixes:
+ - "cronvar - handle empty strings on ``value`` properly (https://github.com/ansible-collections/community.general/issues/10439, https://github.com/ansible-collections/community.general/pull/10445)."
diff --git a/changelogs/fragments/10455-capabilities-improve-error-detection.yml b/changelogs/fragments/10455-capabilities-improve-error-detection.yml
new file mode 100644
index 0000000000..40337a424b
--- /dev/null
+++ b/changelogs/fragments/10455-capabilities-improve-error-detection.yml
@@ -0,0 +1,2 @@
+bugfixes:
+ - capabilities - using invalid path (symlink/directory/...) returned unrelated and incoherent error messages (https://github.com/ansible-collections/community.general/issues/5649, https://github.com/ansible-collections/community.general/pull/10455).
\ No newline at end of file
diff --git a/changelogs/fragments/10458-listen_port_facts-prevent-type-error.yml b/changelogs/fragments/10458-listen_port_facts-prevent-type-error.yml
new file mode 100644
index 0000000000..70af0932b3
--- /dev/null
+++ b/changelogs/fragments/10458-listen_port_facts-prevent-type-error.yml
@@ -0,0 +1,2 @@
+bugfixes:
+ - "listen_port_facts - avoid crash when required commands are missing (https://github.com/ansible-collections/community.general/issues/10457, https://github.com/ansible-collections/community.general/pull/10458)."
\ No newline at end of file
diff --git a/changelogs/fragments/10459-deprecations.yml b/changelogs/fragments/10459-deprecations.yml
new file mode 100644
index 0000000000..4b3f317454
--- /dev/null
+++ b/changelogs/fragments/10459-deprecations.yml
@@ -0,0 +1,6 @@
+bugfixes:
+ - "apache2_module - avoid ansible-core 2.19 deprecation (https://github.com/ansible-collections/community.general/pull/10459)."
+ - "htpasswd - avoid ansible-core 2.19 deprecation (https://github.com/ansible-collections/community.general/pull/10459)."
+ - "syspatch - avoid ansible-core 2.19 deprecation (https://github.com/ansible-collections/community.general/pull/10459)."
+ - "sysupgrade - avoid ansible-core 2.19 deprecation (https://github.com/ansible-collections/community.general/pull/10459)."
+ - "zypper_repository - avoid ansible-core 2.19 deprecation (https://github.com/ansible-collections/community.general/pull/10459)."
diff --git a/changelogs/fragments/10461-cronvar-non-existent-dir-crash-fix.yml b/changelogs/fragments/10461-cronvar-non-existent-dir-crash-fix.yml
new file mode 100644
index 0000000000..c4b77299f5
--- /dev/null
+++ b/changelogs/fragments/10461-cronvar-non-existent-dir-crash-fix.yml
@@ -0,0 +1,2 @@
+bugfixes:
+ - "cronvar - fix crash on missing ``cron_file`` parent directories (https://github.com/ansible-collections/community.general/issues/10460, https://github.com/ansible-collections/community.general/pull/10461)."
diff --git a/changelogs/fragments/10491-irc.yml b/changelogs/fragments/10491-irc.yml
new file mode 100644
index 0000000000..74867e71a7
--- /dev/null
+++ b/changelogs/fragments/10491-irc.yml
@@ -0,0 +1,2 @@
+bugfixes:
+ - "irc - pass hostname to ``wrap_socket()`` if ``use_tls=true`` and ``validate_certs=true`` (https://github.com/ansible-collections/community.general/issues/10472, https://github.com/ansible-collections/community.general/pull/10491)."
diff --git a/changelogs/fragments/10494-rfdn-1.yml b/changelogs/fragments/10494-rfdn-1.yml
new file mode 100644
index 0000000000..09a0c442b0
--- /dev/null
+++ b/changelogs/fragments/10494-rfdn-1.yml
@@ -0,0 +1,27 @@
+minor_changes:
+ - aerospike_migrations - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494).
+ - airbrake_deployment - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494).
+ - bigpanda - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494).
+ - bootc_manage - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494).
+ - bower - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494).
+ - btrfs_subvolume - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494).
+ - bundler - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494).
+ - campfire - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494).
+ - cargo - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494).
+ - catapult - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494).
+ - cisco_webex - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494).
+ - consul_kv - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494).
+ - consul_policy - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494).
+ - copr - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494).
+ - datadog_downtime - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494).
+ - datadog_monitor - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494).
+ - dconf - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494).
+ - dimensiondata_network - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494).
+ - dimensiondata_vlan - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494).
+ - dnf_config_manager - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494).
+ - dnsmadeeasy - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494).
+ - dpkg_divert - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494).
+ - easy_install - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494).
+ - elasticsearch_plugin - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494).
+ - facter - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494).
+ - filesystem - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494).
diff --git a/changelogs/fragments/10505-rfdn-2.yml b/changelogs/fragments/10505-rfdn-2.yml
new file mode 100644
index 0000000000..89aeab9356
--- /dev/null
+++ b/changelogs/fragments/10505-rfdn-2.yml
@@ -0,0 +1,39 @@
+minor_changes:
+ - gem - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505).
+ - git_config_info - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505).
+ - github_deploy_key - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505).
+ - github_repo - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505).
+ - github_webhook - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505).
+ - github_webhook_info - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505).
+ - gitlab_branch - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505).
+ - gitlab_group_access_token - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505).
+ - gitlab_group_variable - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505).
+ - gitlab_hook - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505).
+ - gitlab_instance_variable - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505).
+ - gitlab_issue - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505).
+ - gitlab_label - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505).
+ - gitlab_merge_request - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505).
+ - gitlab_milestone - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505).
+ - gitlab_project - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505).
+ - gitlab_project_access_token - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505).
+ - gitlab_project_variable - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505).
+ - grove - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505).
+ - hg - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505).
+ - homebrew - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505).
+ - homebrew_cask - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505).
+ - homebrew_tap - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505).
+ - honeybadger_deployment - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505).
+ - htpasswd - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505).
+ - icinga2_host - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505).
+ - influxdb_user - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505).
+ - ini_file - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505).
+ - ipa_dnsrecord - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505).
+ - ipa_dnszone - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505).
+ - ipa_service - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505).
+ - ipbase_info - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505).
+ - ipwcli_dns - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505).
+ - irc - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505).
+ - jabber - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505).
+ - jenkins_credential - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505).
+ - jenkins_job - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505).
+ - jenkins_script - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505).
diff --git a/changelogs/fragments/10507-rfdn-3.yml b/changelogs/fragments/10507-rfdn-3.yml
new file mode 100644
index 0000000000..fae9d118bc
--- /dev/null
+++ b/changelogs/fragments/10507-rfdn-3.yml
@@ -0,0 +1,35 @@
+minor_changes:
+ - keycloak_authz_authorization_scope - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507).
+ - keycloak_authz_permission - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507).
+ - keycloak_role - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507).
+ - keycloak_userprofile - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507).
+ - keyring - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507).
+ - kibana_plugin - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507).
+ - layman - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507).
+ - ldap_attrs - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507).
+ - ldap_inc - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507).
+ - librato_annotation - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507).
+ - lldp - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507).
+ - logentries - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507).
+ - lxca_cmms - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507).
+ - lxca_nodes - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507).
+ - macports - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507).
+ - mail - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507).
+ - manageiq_alerts - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507).
+ - manageiq_group - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507).
+ - manageiq_policies - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507).
+ - manageiq_policies_info - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507).
+ - manageiq_tags - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507).
+ - manageiq_tenant - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507).
+ - matrix - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507).
+ - mattermost - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507).
+ - maven_artifact - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507).
+ - memset_dns_reload - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507).
+ - memset_zone - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507).
+ - memset_zone_record - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507).
+ - mqtt - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507).
+ - mssql_db - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507).
+ - mssql_script - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507).
+ - netcup_dns - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507).
+ - newrelic_deployment - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507).
+ - nsupdate - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507).
diff --git a/changelogs/fragments/10512-rfdn-4.yml b/changelogs/fragments/10512-rfdn-4.yml
new file mode 100644
index 0000000000..6d8f9e7d77
--- /dev/null
+++ b/changelogs/fragments/10512-rfdn-4.yml
@@ -0,0 +1,42 @@
+minor_changes:
+ - oci_vcn - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512).
+ - one_image_info - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512).
+ - one_template - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512).
+ - one_vnet - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512).
+ - onepassword_info - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512).
+ - oneview_fc_network_info - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512).
+ - opendj_backendprop - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512).
+ - ovh_monthly_billing - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512).
+ - pagerduty - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512).
+ - pagerduty_change - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512).
+ - pagerduty_user - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512).
+ - pam_limits - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512).
+ - pear - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512).
+ - pkgng - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512).
+ - pnpm - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512).
+ - portage - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512).
+ - pritunl_org - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512).
+ - pritunl_org_info - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512).
+ - pritunl_user - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512).
+ - pritunl_user_info - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512).
+ - pubnub_blocks - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512).
+ - pushbullet - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512).
+ - pushover - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512).
+ - redis_data - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512).
+ - redis_data_incr - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512).
+ - riak - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512).
+ - rocketchat - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512).
+ - rollbar_deployment - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512).
+ - say - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512).
+ - scaleway_database_backup - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512).
+ - sendgrid - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512).
+ - sensu_silence - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512).
+ - sorcery - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512).
+ - ssh_config - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512).
+ - statusio_maintenance - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512).
+ - svr4pkg - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512).
+ - swdepot - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512).
+ - syslogger - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512).
+ - sysrc - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512).
+ - systemd_creds_decrypt - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512).
+ - systemd_creds_encrypt - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512).
diff --git a/changelogs/fragments/10513-rfdn-5.yml b/changelogs/fragments/10513-rfdn-5.yml
new file mode 100644
index 0000000000..d930d7345c
--- /dev/null
+++ b/changelogs/fragments/10513-rfdn-5.yml
@@ -0,0 +1,18 @@
+minor_changes:
+ - taiga_issue - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10513).
+ - twilio - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10513).
+ - utm_aaa_group - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10513).
+ - utm_ca_host_key_cert - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10513).
+ - utm_dns_host - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10513).
+ - utm_network_interface_address - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10513).
+ - utm_proxy_auth_profile - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10513).
+ - utm_proxy_exception - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10513).
+ - utm_proxy_frontend - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10513).
+ - utm_proxy_location - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10513).
+ - vertica_configuration - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10513).
+ - vertica_info - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10513).
+ - vertica_role - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10513).
+ - xbps - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10513).
+ - yarn - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10513).
+ - zypper - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10513).
+ - zypper_repository - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10513).
diff --git a/changelogs/fragments/10531-wsl-paramiko.yml b/changelogs/fragments/10531-wsl-paramiko.yml
new file mode 100644
index 0000000000..08257d6c78
--- /dev/null
+++ b/changelogs/fragments/10531-wsl-paramiko.yml
@@ -0,0 +1,3 @@
+bugfixes:
+ - "wsl connection plugin - avoid deprecated ansible-core paramiko import helper, import paramiko directly instead
+ (https://github.com/ansible-collections/community.general/issues/10515, https://github.com/ansible-collections/community.general/pull/10531)."
diff --git a/changelogs/fragments/10532-apk.yml b/changelogs/fragments/10532-apk.yml
new file mode 100644
index 0000000000..84c5d985e8
--- /dev/null
+++ b/changelogs/fragments/10532-apk.yml
@@ -0,0 +1,2 @@
+bugfixes:
+ - "apk - fix check for empty/whitespace-only package names (https://github.com/ansible-collections/community.general/pull/10532)."
diff --git a/changelogs/fragments/10539-json_query.yml b/changelogs/fragments/10539-json_query.yml
new file mode 100644
index 0000000000..7e84b7ecb0
--- /dev/null
+++ b/changelogs/fragments/10539-json_query.yml
@@ -0,0 +1,2 @@
+bugfixes:
+ - "json_query filter plugin - make compatible with lazy evaluation list and dictionary types of ansible-core 2.19 (https://github.com/ansible-collections/community.general/pull/10539)."
diff --git a/changelogs/fragments/10566-merge_variables.yml b/changelogs/fragments/10566-merge_variables.yml
new file mode 100644
index 0000000000..c0de6dd845
--- /dev/null
+++ b/changelogs/fragments/10566-merge_variables.yml
@@ -0,0 +1,2 @@
+bugfixes:
+ - "merge_variables lookup plugin - avoid deprecated functionality from ansible-core 2.19 (https://github.com/ansible-collections/community.general/pull/10566)."
diff --git a/changelogs/fragments/9499-typetalk-deprecation.yml b/changelogs/fragments/9499-typetalk-deprecation.yml
new file mode 100644
index 0000000000..8323bbe959
--- /dev/null
+++ b/changelogs/fragments/9499-typetalk-deprecation.yml
@@ -0,0 +1,2 @@
+deprecated_features:
+ - typetalk - module is deprecated and will be removed in community.general 13.0.0 (https://github.com/ansible-collections/community.general/pull/9499).
diff --git a/changelogs/fragments/become-pipelining.yml b/changelogs/fragments/become-pipelining.yml
new file mode 100644
index 0000000000..201d85f71c
--- /dev/null
+++ b/changelogs/fragments/become-pipelining.yml
@@ -0,0 +1,3 @@
+bugfixes:
+ - "doas become plugin - disable pipelining on ansible-core 2.19+. The plugin does not work with pipelining, and since ansible-core 2.19 become plugins can indicate that they do not work with pipelining (https://github.com/ansible-collections/community.general/issues/9977, https://github.com/ansible-collections/community.general/pull/10537)."
+ - "machinectl become plugin - disable pipelining on ansible-core 2.19+. The plugin does not work with pipelining, and since ansible-core 2.19 become plugins can indicate that they do not work with pipelining (https://github.com/ansible-collections/community.general/pull/10537)."
diff --git a/changelogs/fragments/logstash.yml b/changelogs/fragments/logstash.yml
new file mode 100644
index 0000000000..1c7ec89b7d
--- /dev/null
+++ b/changelogs/fragments/logstash.yml
@@ -0,0 +1,3 @@
+---
+bugfixes:
+ - logstash callback plugin - remove reference to Python 2 library (https://github.com/ansible-collections/community.general/pull/10345).
diff --git a/docs/docsite/extra-docs.yml b/docs/docsite/extra-docs.yml
index 156e93309d..4594ab4c2d 100644
--- a/docs/docsite/extra-docs.yml
+++ b/docs/docsite/extra-docs.yml
@@ -8,9 +8,10 @@ sections:
toctree:
- filter_guide
- test_guide
- - title: Cloud Guides
+ - title: Technology Guides
toctree:
- guide_alicloud
+ - guide_iocage
- guide_online
- guide_packet
- guide_scaleway
diff --git a/docs/docsite/helper/keep_keys/filter_guide-abstract_informations-lists_of_dictionaries-keep_keys.rst.j2 b/docs/docsite/helper/keep_keys/filter_guide-abstract_informations-lists_of_dictionaries-keep_keys.rst.j2
index 77281549ba..64ac1ff0c2 100644
--- a/docs/docsite/helper/keep_keys/filter_guide-abstract_informations-lists_of_dictionaries-keep_keys.rst.j2
+++ b/docs/docsite/helper/keep_keys/filter_guide-abstract_informations-lists_of_dictionaries-keep_keys.rst.j2
@@ -36,7 +36,7 @@ gives
result:
{{ tests.0.result | to_yaml(indent=2) | indent(5) }}
-
+
.. versionadded:: 9.1.0
* The results of the below examples 1-5 are all the same:
diff --git a/docs/docsite/helper/remove_keys/filter_guide-abstract_informations-lists_of_dictionaries-remove_keys.rst.j2 b/docs/docsite/helper/remove_keys/filter_guide-abstract_informations-lists_of_dictionaries-remove_keys.rst.j2
index 62b25c344c..6c201d5b4e 100644
--- a/docs/docsite/helper/remove_keys/filter_guide-abstract_informations-lists_of_dictionaries-remove_keys.rst.j2
+++ b/docs/docsite/helper/remove_keys/filter_guide-abstract_informations-lists_of_dictionaries-remove_keys.rst.j2
@@ -36,7 +36,7 @@ gives
result:
{{ tests.0.result | to_yaml(indent=2) | indent(5) }}
-
+
.. versionadded:: 9.1.0
* The results of the below examples 1-5 are all the same:
diff --git a/docs/docsite/helper/replace_keys/filter_guide-abstract_informations-lists_of_dictionaries-replace_keys.rst.j2 b/docs/docsite/helper/replace_keys/filter_guide-abstract_informations-lists_of_dictionaries-replace_keys.rst.j2
index fb0af32f2f..0c0ba8f0be 100644
--- a/docs/docsite/helper/replace_keys/filter_guide-abstract_informations-lists_of_dictionaries-replace_keys.rst.j2
+++ b/docs/docsite/helper/replace_keys/filter_guide-abstract_informations-lists_of_dictionaries-replace_keys.rst.j2
@@ -37,7 +37,7 @@ gives
result:
{{ tests.0.result | to_yaml(indent=2) | indent(5) }}
-
+
.. versionadded:: 9.1.0
* The results of the below examples 1-3 are all the same:
diff --git a/docs/docsite/rst/filter_guide-abstract_informations-lists_of_dictionaries-keep_keys.rst b/docs/docsite/rst/filter_guide-abstract_informations-lists_of_dictionaries-keep_keys.rst
index 488cb2ce7d..3549d29ba7 100644
--- a/docs/docsite/rst/filter_guide-abstract_informations-lists_of_dictionaries-keep_keys.rst
+++ b/docs/docsite/rst/filter_guide-abstract_informations-lists_of_dictionaries-keep_keys.rst
@@ -44,7 +44,7 @@ gives
- {k0_x0: A0, k1_x1: B0}
- {k0_x0: A1, k1_x1: B1}
-
+
.. versionadded:: 9.1.0
* The results of the below examples 1-5 are all the same:
diff --git a/docs/docsite/rst/filter_guide-abstract_informations-lists_of_dictionaries-remove_keys.rst b/docs/docsite/rst/filter_guide-abstract_informations-lists_of_dictionaries-remove_keys.rst
index 03d4710f3a..4ac87ab79c 100644
--- a/docs/docsite/rst/filter_guide-abstract_informations-lists_of_dictionaries-remove_keys.rst
+++ b/docs/docsite/rst/filter_guide-abstract_informations-lists_of_dictionaries-remove_keys.rst
@@ -46,7 +46,7 @@ gives
- k2_x2: [C1]
k3_x3: bar
-
+
.. versionadded:: 9.1.0
* The results of the below examples 1-5 are all the same:
diff --git a/docs/docsite/rst/filter_guide-abstract_informations-lists_of_dictionaries-replace_keys.rst b/docs/docsite/rst/filter_guide-abstract_informations-lists_of_dictionaries-replace_keys.rst
index ba1bcad502..d0eb202bfe 100644
--- a/docs/docsite/rst/filter_guide-abstract_informations-lists_of_dictionaries-replace_keys.rst
+++ b/docs/docsite/rst/filter_guide-abstract_informations-lists_of_dictionaries-replace_keys.rst
@@ -53,7 +53,7 @@ gives
k2_x2: [C1]
k3_x3: bar
-
+
.. versionadded:: 9.1.0
* The results of the below examples 1-3 are all the same:
diff --git a/docs/docsite/rst/filter_guide-abstract_informations-lists_of_dictionaries.rst b/docs/docsite/rst/filter_guide-abstract_informations-lists_of_dictionaries.rst
index 42737c44b7..64a82536d8 100644
--- a/docs/docsite/rst/filter_guide-abstract_informations-lists_of_dictionaries.rst
+++ b/docs/docsite/rst/filter_guide-abstract_informations-lists_of_dictionaries.rst
@@ -4,7 +4,7 @@
SPDX-License-Identifier: GPL-3.0-or-later
.. _ansible_collections.community.general.docsite.filter_guide.filter_guide_abstract_informations.lists_of_dicts:
-
+
Lists of dictionaries
^^^^^^^^^^^^^^^^^^^^^
diff --git a/docs/docsite/rst/filter_guide.rst b/docs/docsite/rst/filter_guide.rst
index 1c6468ddec..da8a90af3c 100644
--- a/docs/docsite/rst/filter_guide.rst
+++ b/docs/docsite/rst/filter_guide.rst
@@ -8,7 +8,7 @@
community.general Filter Guide
==============================
-The :ref:`community.general collection ` offers several useful filter plugins.
+The :anscollection:`community.general collection ` offers several useful filter plugins.
.. toctree::
:maxdepth: 2
diff --git a/docs/docsite/rst/filter_guide_abstract_informations_dictionaries.rst b/docs/docsite/rst/filter_guide_abstract_informations_dictionaries.rst
index 3059b00321..e5b5bb7e36 100644
--- a/docs/docsite/rst/filter_guide_abstract_informations_dictionaries.rst
+++ b/docs/docsite/rst/filter_guide_abstract_informations_dictionaries.rst
@@ -26,8 +26,8 @@ You can use the :ansplugin:`community.general.dict_kv filter `_
+* `man iocage `_
+* `Jails and Containers `_
+
+.. note::
+ The output of the examples is YAML formatted. See the option :ansopt:`ansible.builtin.default#callback:result_format`.
+
+.. toctree::
+ :caption: Table of Contents
+ :maxdepth: 1
+
+ guide_iocage_inventory_basics
+ guide_iocage_inventory_dhcp
+ guide_iocage_inventory_hooks
+ guide_iocage_inventory_properties
+ guide_iocage_inventory_tags
+ guide_iocage_inventory_aliases
diff --git a/docs/docsite/rst/guide_iocage_inventory_aliases.rst b/docs/docsite/rst/guide_iocage_inventory_aliases.rst
new file mode 100644
index 0000000000..431403d733
--- /dev/null
+++ b/docs/docsite/rst/guide_iocage_inventory_aliases.rst
@@ -0,0 +1,200 @@
+..
+ Copyright (c) Ansible Project
+ GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+ SPDX-License-Identifier: GPL-3.0-or-later
+
+.. _ansible_collections.community.general.docsite.guide_iocage.guide_iocage_inventory.guide_iocage_inventory_aliases:
+
+Aliases
+-------
+
+Quoting :ref:`inventory_aliases`:
+
+ The ``inventory_hostname`` is the unique identifier for a host in Ansible, this can be an IP or a hostname, but also just an 'alias' or short name for the host.
+
+As root at the iocage host, stop and destroy all jails:
+
+.. code-block:: console
+
+ shell> iocage stop ALL
+ * Stopping srv_1
+ + Executing prestop OK
+ + Stopping services OK
+ + Tearing down VNET OK
+ + Removing devfs_ruleset: 1000 OK
+ + Removing jail process OK
+ + Executing poststop OK
+ * Stopping srv_2
+ + Executing prestop OK
+ + Stopping services OK
+ + Tearing down VNET OK
+ + Removing devfs_ruleset: 1001 OK
+ + Removing jail process OK
+ + Executing poststop OK
+ * Stopping srv_3
+ + Executing prestop OK
+ + Stopping services OK
+ + Tearing down VNET OK
+ + Removing devfs_ruleset: 1002 OK
+ + Removing jail process OK
+ + Executing poststop OK
+ ansible_client is not running!
+
+ shell> iocage destroy -f srv_1 srv_2 srv_3
+ Destroying srv_1
+ Destroying srv_2
+ Destroying srv_3
+
+Create three VNET jails with a DHCP interface from the template *ansible_client*. Use the option ``--count``:
+
+.. code-block:: console
+
+ shell> iocage create --short --template ansible_client --count 3 bpf=1 dhcp=1 vnet=1
+ 1c11de2d successfully created!
+ 9d94cc9e successfully created!
+ 052b9557 successfully created!
+
+The names are random. Start the jails:
+
+.. code-block:: console
+
+ shell> iocage start ALL
+ No default gateway found for ipv6.
+ * Starting 052b9557
+ + Started OK
+ + Using devfs_ruleset: 1000 (iocage generated default)
+ + Configuring VNET OK
+ + Using IP options: vnet
+ + Starting services OK
+ + Executing poststart OK
+ + DHCP Address: 10.1.0.137/24
+ No default gateway found for ipv6.
+ * Starting 1c11de2d
+ + Started OK
+ + Using devfs_ruleset: 1001 (iocage generated default)
+ + Configuring VNET OK
+ + Using IP options: vnet
+ + Starting services OK
+ + Executing poststart OK
+ + DHCP Address: 10.1.0.146/24
+ No default gateway found for ipv6.
+ * Starting 9d94cc9e
+ + Started OK
+ + Using devfs_ruleset: 1002 (iocage generated default)
+ + Configuring VNET OK
+ + Using IP options: vnet
+ + Starting services OK
+ + Executing poststart OK
+ + DHCP Address: 10.1.0.115/24
+ Please convert back to a jail before trying to start ansible_client
+
+List the jails:
+
+.. code-block:: console
+
+ shell> iocage list -l
+ +-----+----------+------+-------+------+-----------------+--------------------+-----+----------------+----------+
+ | JID | NAME | BOOT | STATE | TYPE | RELEASE | IP4 | IP6 | TEMPLATE | BASEJAIL |
+ +=====+==========+======+=======+======+=================+====================+=====+================+==========+
+ | 207 | 052b9557 | off | up | jail | 14.2-RELEASE-p3 | epair0b|10.1.0.137 | - | ansible_client | no |
+ +-----+----------+------+-------+------+-----------------+--------------------+-----+----------------+----------+
+ | 208 | 1c11de2d | off | up | jail | 14.2-RELEASE-p3 | epair0b|10.1.0.146 | - | ansible_client | no |
+ +-----+----------+------+-------+------+-----------------+--------------------+-----+----------------+----------+
+ | 209 | 9d94cc9e | off | up | jail | 14.2-RELEASE-p3 | epair0b|10.1.0.115 | - | ansible_client | no |
+ +-----+----------+------+-------+------+-----------------+--------------------+-----+----------------+----------+
+
+Set notes. The tag *alias* will be used to create inventory aliases:
+
+.. code-block:: console
+
+ shell> iocage set notes="vmm=iocage_02 project=foo alias=srv_1" 052b9557
+ notes: none -> vmm=iocage_02 project=foo alias=srv_1
+ shell> iocage set notes="vmm=iocage_02 project=foo alias=srv_2" 1c11de2d
+ notes: none -> vmm=iocage_02 project=foo alias=srv_2
+ shell> iocage set notes="vmm=iocage_02 project=bar alias=srv_3" 9d94cc9e
+ notes: none -> vmm=iocage_02 project=bar alias=srv_3
+
+Update the inventory configuration. Set the option
+:ansopt:`community.general.iocage#inventory:inventory_hostname_tag` to :ansval:`alias`. This tag keeps the
+value of the alias. The option :ansopt:`community.general.iocage#inventory:get_properties` must be
+enabled. For example, ``hosts/02_iocage.yml`` contains:
+
+.. code-block:: yaml
+
+ plugin: community.general.iocage
+ host: 10.1.0.73
+ user: admin
+ get_properties: true
+ inventory_hostname_tag: alias
+ hooks_results:
+ - /var/db/dhclient-hook.address.epair0b
+ compose:
+ ansible_host: (iocage_hooks.0 == '-') | ternary(iocage_ip4, iocage_hooks.0)
+ iocage_tags: dict(iocage_properties.notes | split | map('split', '='))
+ keyed_groups:
+ - prefix: vmm
+ key: iocage_tags.vmm
+ - prefix: project
+ key: iocage_tags.project
+
+Display tags and groups. Create a playbook ``pb-test-groups.yml`` with the following content:
+
+.. code-block:: yaml+jinja
+
+ - hosts: all
+ remote_user: admin
+
+ vars:
+
+ ansible_python_interpreter: auto_silent
+
+ tasks:
+
+ - debug:
+ var: iocage_tags
+
+ - debug:
+ msg: |
+ {% for group in groups %}
+ {{ group }}: {{ groups[group] }}
+ {% endfor %}
+ run_once: true
+
+Run the playbook:
+
+.. code-block:: console
+
+ shell> ansible-playbook -i hosts/02_iocage.yml pb-test-groups.yml
+
+ PLAY [all] **********************************************************************************************************
+
+ TASK [debug] ********************************************************************************************************
+ ok: [srv_1] =>
+ iocage_tags:
+ alias: srv_1
+ project: foo
+ vmm: iocage_02
+ ok: [srv_2] =>
+ iocage_tags:
+ alias: srv_2
+ project: foo
+ vmm: iocage_02
+ ok: [srv_3] =>
+ iocage_tags:
+ alias: srv_3
+ project: bar
+ vmm: iocage_02
+
+ TASK [debug] ********************************************************************************************************
+ ok: [srv_1] =>
+ msg: |-
+ all: ['srv_1', 'srv_2', 'srv_3']
+ ungrouped: []
+ vmm_iocage_02: ['srv_1', 'srv_2', 'srv_3']
+ project_foo: ['srv_1', 'srv_2']
+ project_bar: ['srv_3']
+
+ PLAY RECAP **********************************************************************************************************
+ srv_1 : ok=2 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0
+ srv_2 : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0
+ srv_3 : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0
diff --git a/docs/docsite/rst/guide_iocage_inventory_basics.rst b/docs/docsite/rst/guide_iocage_inventory_basics.rst
new file mode 100644
index 0000000000..f198edc4f4
--- /dev/null
+++ b/docs/docsite/rst/guide_iocage_inventory_basics.rst
@@ -0,0 +1,128 @@
+..
+ Copyright (c) Ansible Project
+ GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+ SPDX-License-Identifier: GPL-3.0-or-later
+
+.. _ansible_collections.community.general.docsite.guide_iocage.guide_iocage_inventory.guide_iocage_inventory_basics:
+
+Basics
+------
+
+As root at the iocage host, create three VNET jails with a DHCP interface from the template
+*ansible_client*:
+
+.. code-block:: console
+
+ shell> iocage create --template ansible_client --name srv_1 bpf=1 dhcp=1 vnet=1
+ srv_1 successfully created!
+ shell> iocage create --template ansible_client --name srv_2 bpf=1 dhcp=1 vnet=1
+ srv_2 successfully created!
+ shell> iocage create --template ansible_client --name srv_3 bpf=1 dhcp=1 vnet=1
+ srv_3 successfully created!
+
+See: `Configuring a VNET Jail `_.
+
+As admin at the controller, list the jails:
+
+.. code-block:: console
+
+ shell> ssh admin@10.1.0.73 iocage list -l
+ +------+-------+------+-------+------+-----------------+--------------------+-----+----------------+----------+
+ | JID | NAME | BOOT | STATE | TYPE | RELEASE | IP4 | IP6 | TEMPLATE | BASEJAIL |
+ +======+=======+======+=======+======+=================+====================+=====+================+==========+
+ | None | srv_1 | off | down | jail | 14.2-RELEASE-p3 | DHCP (not running) | - | ansible_client | no |
+ +------+-------+------+-------+------+-----------------+--------------------+-----+----------------+----------+
+ | None | srv_2 | off | down | jail | 14.2-RELEASE-p3 | DHCP (not running) | - | ansible_client | no |
+ +------+-------+------+-------+------+-----------------+--------------------+-----+----------------+----------+
+ | None | srv_3 | off | down | jail | 14.2-RELEASE-p3 | DHCP (not running) | - | ansible_client | no |
+ +------+-------+------+-------+------+-----------------+--------------------+-----+----------------+----------+
+
+Create the inventory file ``hosts/02_iocage.yml``
+
+.. code-block:: yaml
+
+ plugin: community.general.iocage
+ host: 10.1.0.73
+ user: admin
+
+Display the inventory:
+
+.. code-block:: console
+
+ shell> ansible-inventory -i hosts/02_iocage.yml --list --yaml
+ all:
+ children:
+ ungrouped:
+ hosts:
+ srv_1:
+ iocage_basejail: 'no'
+ iocage_boot: 'off'
+ iocage_ip4: '-'
+ iocage_ip4_dict:
+ ip4: []
+ msg: DHCP (not running)
+ iocage_ip6: '-'
+ iocage_jid: None
+ iocage_release: 14.2-RELEASE-p3
+ iocage_state: down
+ iocage_template: ansible_client
+ iocage_type: jail
+ srv_2:
+ iocage_basejail: 'no'
+ iocage_boot: 'off'
+ iocage_ip4: '-'
+ iocage_ip4_dict:
+ ip4: []
+ msg: DHCP (not running)
+ iocage_ip6: '-'
+ iocage_jid: None
+ iocage_release: 14.2-RELEASE-p3
+ iocage_state: down
+ iocage_template: ansible_client
+ iocage_type: jail
+ srv_3:
+ iocage_basejail: 'no'
+ iocage_boot: 'off'
+ iocage_ip4: '-'
+ iocage_ip4_dict:
+ ip4: []
+ msg: DHCP (not running)
+ iocage_ip6: '-'
+ iocage_jid: None
+ iocage_release: 14.2-RELEASE-p3
+ iocage_state: down
+ iocage_template: ansible_client
+ iocage_type: jail
+
+Optionally, create shared IP jails:
+
+.. code-block:: console
+
+ shell> iocage create --template ansible_client --name srv_1 ip4_addr="em0|10.1.0.101/24"
+ srv_1 successfully created!
+ shell> iocage create --template ansible_client --name srv_2 ip4_addr="em0|10.1.0.102/24"
+ srv_2 successfully created!
+ shell> iocage create --template ansible_client --name srv_3 ip4_addr="em0|10.1.0.103/24"
+ srv_3 successfully created!
+ shell> iocage list -l
+ +------+-------+------+-------+------+-----------------+-------------------+-----+----------------+----------+
+ | JID | NAME | BOOT | STATE | TYPE | RELEASE | IP4 | IP6 | TEMPLATE | BASEJAIL |
+ +======+=======+======+=======+======+=================+===================+=====+================+==========+
+ | None | srv_1 | off | down | jail | 14.2-RELEASE-p3 | em0|10.1.0.101/24 | - | ansible_client | no |
+ +------+-------+------+-------+------+-----------------+-------------------+-----+----------------+----------+
+ | None | srv_2 | off | down | jail | 14.2-RELEASE-p3 | em0|10.1.0.102/24 | - | ansible_client | no |
+ +------+-------+------+-------+------+-----------------+-------------------+-----+----------------+----------+
+ | None | srv_3 | off | down | jail | 14.2-RELEASE-p3 | em0|10.1.0.103/24 | - | ansible_client | no |
+ +------+-------+------+-------+------+-----------------+-------------------+-----+----------------+----------+
+
+See: `Configuring a Shared IP Jail `_
+
+If iocage needs environment variable(s), use the option :ansopt:`community.general.iocage#inventory:env`. For example,
+
+.. code-block:: yaml
+
+ plugin: community.general.iocage
+ host: 10.1.0.73
+ user: admin
+ env:
+ CRYPTOGRAPHY_OPENSSL_NO_LEGACY: 1
diff --git a/docs/docsite/rst/guide_iocage_inventory_dhcp.rst b/docs/docsite/rst/guide_iocage_inventory_dhcp.rst
new file mode 100644
index 0000000000..3c37366ca6
--- /dev/null
+++ b/docs/docsite/rst/guide_iocage_inventory_dhcp.rst
@@ -0,0 +1,175 @@
+..
+ Copyright (c) Ansible Project
+ GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+ SPDX-License-Identifier: GPL-3.0-or-later
+
+.. _ansible_collections.community.general.docsite.guide_iocage.guide_iocage_inventory.guide_iocage_inventory_dhcp:
+
+DHCP
+----
+
+As root at the iocage host, start the jails:
+
+.. code-block:: console
+
+ shell> iocage start ALL
+ No default gateway found for ipv6.
+ * Starting srv_1
+ + Started OK
+ + Using devfs_ruleset: 1000 (iocage generated default)
+ + Configuring VNET OK
+ + Using IP options: vnet
+ + Starting services OK
+ + Executing poststart OK
+ + DHCP Address: 10.1.0.183/24
+ No default gateway found for ipv6.
+ * Starting srv_2
+ + Started OK
+ + Using devfs_ruleset: 1001 (iocage generated default)
+ + Configuring VNET OK
+ + Using IP options: vnet
+ + Starting services OK
+ + Executing poststart OK
+ + DHCP Address: 10.1.0.204/24
+ No default gateway found for ipv6.
+ * Starting srv_3
+ + Started OK
+ + Using devfs_ruleset: 1002 (iocage generated default)
+ + Configuring VNET OK
+ + Using IP options: vnet
+ + Starting services OK
+ + Executing poststart OK
+ + DHCP Address: 10.1.0.169/24
+ Please convert back to a jail before trying to start ansible_client
+
+List the jails:
+
+.. code-block:: console
+
+ shell> iocage list -l
+ +-----+-------+------+-------+------+-----------------+--------------------+-----+----------------+----------+
+ | JID | NAME | BOOT | STATE | TYPE | RELEASE | IP4 | IP6 | TEMPLATE | BASEJAIL |
+ +=====+=======+======+=======+======+=================+====================+=====+================+==========+
+ | 204 | srv_1 | off | up | jail | 14.2-RELEASE-p3 | epair0b|10.1.0.183 | - | ansible_client | no |
+ +-----+-------+------+-------+------+-----------------+--------------------+-----+----------------+----------+
+ | 205 | srv_2 | off | up | jail | 14.2-RELEASE-p3 | epair0b|10.1.0.204 | - | ansible_client | no |
+ +-----+-------+------+-------+------+-----------------+--------------------+-----+----------------+----------+
+ | 206 | srv_3 | off | up | jail | 14.2-RELEASE-p3 | epair0b|10.1.0.169 | - | ansible_client | no |
+ +-----+-------+------+-------+------+-----------------+--------------------+-----+----------------+----------+
+
+As admin at the controller, list the jails. The IP4 tab says "... address requires root":
+
+.. code-block:: console
+
+ shell> ssh admin@10.1.0.73 iocage list -l
+ +-----+-------+------+-------+------+-----------------+-----------------------------------------+-----+----------------+----------+
+ | JID | NAME | BOOT | STATE | TYPE | RELEASE | IP4 | IP6 | TEMPLATE | BASEJAIL |
+ +=====+=======+======+=======+======+=================+=========================================+=====+================+==========+
+ | 204 | srv_1 | off | up | jail | 14.2-RELEASE-p3 | DHCP (running -- address requires root) | - | ansible_client | no |
+ +-----+-------+------+-------+------+-----------------+-----------------------------------------+-----+----------------+----------+
+ | 205 | srv_2 | off | up | jail | 14.2-RELEASE-p3 | DHCP (running -- address requires root) | - | ansible_client | no |
+ +-----+-------+------+-------+------+-----------------+-----------------------------------------+-----+----------------+----------+
+ | 206 | srv_3 | off | up | jail | 14.2-RELEASE-p3 | DHCP (running -- address requires root) | - | ansible_client | no |
+ +-----+-------+------+-------+------+-----------------+-----------------------------------------+-----+----------------+----------+
+
+Use sudo if enabled:
+
+.. code-block:: console
+
+ shell> ssh admin@10.1.0.73 sudo iocage list -l
+ +-----+-------+------+-------+------+-----------------+--------------------+-----+----------------+----------+
+ | JID | NAME | BOOT | STATE | TYPE | RELEASE | IP4 | IP6 | TEMPLATE | BASEJAIL |
+ +=====+=======+======+=======+======+=================+====================+=====+================+==========+
+ | 204 | srv_1 | off | up | jail | 14.2-RELEASE-p3 | epair0b|10.1.0.183 | - | ansible_client | no |
+ +-----+-------+------+-------+------+-----------------+--------------------+-----+----------------+----------+
+ | 205 | srv_2 | off | up | jail | 14.2-RELEASE-p3 | epair0b|10.1.0.204 | - | ansible_client | no |
+ +-----+-------+------+-------+------+-----------------+--------------------+-----+----------------+----------+
+ | 206 | srv_3 | off | up | jail | 14.2-RELEASE-p3 | epair0b|10.1.0.169 | - | ansible_client | no |
+ +-----+-------+------+-------+------+-----------------+--------------------+-----+----------------+----------+
+
+Create the inventory file ``hosts/02_iocage.yml``. Use the option
+:ansopt:`community.general.iocage#inventory:sudo`:
+
+.. code-block:: yaml
+
+ plugin: community.general.iocage
+ host: 10.1.0.73
+ user: admin
+ sudo: true
+
+Display the inventory:
+
+.. code-block:: console
+
+ shell> ansible-inventory -i hosts/02_iocage.yml --list --yaml
+ all:
+ children:
+ ungrouped:
+ hosts:
+ srv_1:
+ iocage_basejail: 'no'
+ iocage_boot: 'off'
+ iocage_ip4: 10.1.0.183
+ iocage_ip4_dict:
+ ip4:
+ - ifc: epair0b
+ ip: 10.1.0.183
+ mask: '-'
+ msg: ''
+ iocage_ip6: '-'
+ iocage_jid: '204'
+ iocage_release: 14.2-RELEASE-p3
+ iocage_state: up
+ iocage_template: ansible_client
+ iocage_type: jail
+ srv_2:
+ iocage_basejail: 'no'
+ iocage_boot: 'off'
+ iocage_ip4: 10.1.0.204
+ iocage_ip4_dict:
+ ip4:
+ - ifc: epair0b
+ ip: 10.1.0.204
+ mask: '-'
+ msg: ''
+ iocage_ip6: '-'
+ iocage_jid: '205'
+ iocage_release: 14.2-RELEASE-p3
+ iocage_state: up
+ iocage_template: ansible_client
+ iocage_type: jail
+ srv_3:
+ iocage_basejail: 'no'
+ iocage_boot: 'off'
+ iocage_ip4: 10.1.0.169
+ iocage_ip4_dict:
+ ip4:
+ - ifc: epair0b
+ ip: 10.1.0.169
+ mask: '-'
+ msg: ''
+ iocage_ip6: '-'
+ iocage_jid: '206'
+ iocage_release: 14.2-RELEASE-p3
+ iocage_state: up
+ iocage_template: ansible_client
+ iocage_type: jail
+
+Note: If the option :ansopt:`community.general.iocage#inventory:env` is used and :ansopt:`community.general.iocage#inventory:sudo` is enabled, enable also :ansopt:`community.general.iocage#inventory:sudo_preserve_env`. For example,
+
+.. code-block:: yaml
+
+ plugin: community.general.iocage
+ host: 10.1.0.73
+ user: admin
+ env:
+ CRYPTOGRAPHY_OPENSSL_NO_LEGACY: 1
+ sudo: true
+ sudo_preserve_env: true
+
+In this case, make sure the sudo tag ``SETENV`` is used:
+
+.. code-block:: console
+
+ shell> ssh admin@10.1.0.73 sudo cat /usr/local/etc/sudoers | grep admin
+ admin ALL=(ALL) NOPASSWD:SETENV: ALL
diff --git a/docs/docsite/rst/guide_iocage_inventory_hooks.rst b/docs/docsite/rst/guide_iocage_inventory_hooks.rst
new file mode 100644
index 0000000000..45364fc798
--- /dev/null
+++ b/docs/docsite/rst/guide_iocage_inventory_hooks.rst
@@ -0,0 +1,187 @@
+..
+ Copyright (c) Ansible Project
+ GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+ SPDX-License-Identifier: GPL-3.0-or-later
+
+.. _ansible_collections.community.general.docsite.guide_iocage.guide_iocage_inventory.guide_iocage_inventory_hooks:
+
+Hooks
+-----
+
+The iocage utility internally opens a console to a jail to get the jail's DHCP address. This
+requires root. If you run the command ``iocage list -l`` as unprivileged user, you'll see the
+message ``DHCP (running -- address requires root)``. If you are not granted the root privilege, use
+``/etc/dhclient-exit-hooks``. For example, in the jail *srv_1*, create the file
+``/zroot/iocage/jails/srv_1/root/etc/dhclient-exit-hooks``
+
+.. code-block:: shell
+
+ case "$reason" in
+ "BOUND"|"REBIND"|"REBOOT"|"RENEW")
+ echo $new_ip_address > /var/db/dhclient-hook.address.$interface
+ ;;
+ esac
+
+where ``/zroot/iocage`` is the activated pool.
+
+.. code-block:: console
+
+ shell> zfs list | grep /zroot/iocage
+ zroot/iocage 4.69G 446G 5.08M /zroot/iocage
+ zroot/iocage/download 927M 446G 384K /zroot/iocage/download
+ zroot/iocage/download/14.1-RELEASE 465M 446G 465M /zroot/iocage/download/14.1-RELEASE
+ zroot/iocage/download/14.2-RELEASE 462M 446G 462M /zroot/iocage/download/14.2-RELEASE
+ zroot/iocage/images 384K 446G 384K /zroot/iocage/images
+ zroot/iocage/jails 189M 446G 480K /zroot/iocage/jails
+ zroot/iocage/jails/srv_1 62.9M 446G 464K /zroot/iocage/jails/srv_1
+ zroot/iocage/jails/srv_1/root 62.4M 446G 3.53G /zroot/iocage/jails/srv_1/root
+ zroot/iocage/jails/srv_2 62.8M 446G 464K /zroot/iocage/jails/srv_2
+ zroot/iocage/jails/srv_2/root 62.3M 446G 3.53G /zroot/iocage/jails/srv_2/root
+ zroot/iocage/jails/srv_3 62.8M 446G 464K /zroot/iocage/jails/srv_3
+ zroot/iocage/jails/srv_3/root 62.3M 446G 3.53G /zroot/iocage/jails/srv_3/root
+ zroot/iocage/log 688K 446G 688K /zroot/iocage/log
+ zroot/iocage/releases 2.93G 446G 384K /zroot/iocage/releases
+ zroot/iocage/releases/14.2-RELEASE 2.93G 446G 384K /zroot/iocage/releases/14.2-RELEASE
+ zroot/iocage/releases/14.2-RELEASE/root 2.93G 446G 2.88G /zroot/iocage/releases/14.2-RELEASE/root
+ zroot/iocage/templates 682M 446G 416K /zroot/iocage/templates
+ zroot/iocage/templates/ansible_client 681M 446G 432K /zroot/iocage/templates/ansible_client
+ zroot/iocage/templates/ansible_client/root 681M 446G 3.53G /zroot/iocage/templates/ansible_client/root
+
+See: `man dhclient-script `_
+
+Create the inventory configuration. Use the option :ansopt:`community.general.iocage#inventory:hooks_results` instead of :ansopt:`community.general.iocage#inventory:sudo`:
+
+.. code-block:: console
+
+ shell> cat hosts/02_iocage.yml
+
+.. code-block:: yaml
+
+ plugin: community.general.iocage
+ host: 10.1.0.73
+ user: admin
+ hooks_results:
+ - /var/db/dhclient-hook.address.epair0b
+
+.. note::
+
+ The option :ansopt:`community.general.iocage#inventory:hooks_results` expects the poolname to be mounted to ``/poolname``. For example, if you
+ activate the pool iocage, this plugin expects to find the :ansopt:`community.general.iocage#inventory:hooks_results` items in the path
+ /iocage/iocage/jails//root. If you mount the poolname to a different path, the easiest
+ remedy is to create a symlink.
+
+As admin at the controller, display the inventory:
+
+.. code-block:: console
+
+ shell> ansible-inventory -i hosts/02_iocage.yml --list --yaml
+ all:
+ children:
+ ungrouped:
+ hosts:
+ srv_1:
+ iocage_basejail: 'no'
+ iocage_boot: 'off'
+ iocage_hooks:
+ - 10.1.0.183
+ iocage_ip4: '-'
+ iocage_ip4_dict:
+ ip4: []
+ msg: DHCP (running -- address requires root)
+ iocage_ip6: '-'
+ iocage_jid: '204'
+ iocage_release: 14.2-RELEASE-p3
+ iocage_state: up
+ iocage_template: ansible_client
+ iocage_type: jail
+ srv_2:
+ iocage_basejail: 'no'
+ iocage_boot: 'off'
+ iocage_hooks:
+ - 10.1.0.204
+ iocage_ip4: '-'
+ iocage_ip4_dict:
+ ip4: []
+ msg: DHCP (running -- address requires root)
+ iocage_ip6: '-'
+ iocage_jid: '205'
+ iocage_release: 14.2-RELEASE-p3
+ iocage_state: up
+ iocage_template: ansible_client
+ iocage_type: jail
+ srv_3:
+ iocage_basejail: 'no'
+ iocage_boot: 'off'
+ iocage_hooks:
+ - 10.1.0.169
+ iocage_ip4: '-'
+ iocage_ip4_dict:
+ ip4: []
+ msg: DHCP (running -- address requires root)
+ iocage_ip6: '-'
+ iocage_jid: '206'
+ iocage_release: 14.2-RELEASE-p3
+ iocage_state: up
+ iocage_template: ansible_client
+ iocage_type: jail
+
+Compose the variable ``ansible_host``. For example, ``hosts/02_iocage.yml`` could look like:
+
+.. code-block:: yaml+jinja
+
+ plugin: community.general.iocage
+ host: 10.1.0.73
+ user: admin
+ hooks_results:
+ - /var/db/dhclient-hook.address.epair0b
+ compose:
+ ansible_host: (iocage_hooks.0 == '-') | ternary(iocage_ip4, iocage_hooks.0)
+
+Test the jails. Create a playbook ``pb-test-uname.yml``:
+
+.. code-block:: yaml
+
+ - hosts: all
+ remote_user: admin
+
+ vars:
+
+ ansible_python_interpreter: auto_silent
+
+ tasks:
+
+ - command: uname -a
+ register: out
+
+ - debug:
+ var: out.stdout
+
+See: :ref:`working_with_bsd`
+
+Run the playbook:
+
+.. code-block:: console
+
+ shell> ansible-playbook -i hosts/02_iocage.yml pb-test-uname.yml
+
+ PLAY [all] **********************************************************************************************************
+
+ TASK [command] ******************************************************************************************************
+ changed: [srv_3]
+ changed: [srv_1]
+ changed: [srv_2]
+
+ TASK [debug] ********************************************************************************************************
+ ok: [srv_1] =>
+ out.stdout: FreeBSD srv-1 14.2-RELEASE-p1 FreeBSD 14.2-RELEASE-p1 GENERIC amd64
+ ok: [srv_3] =>
+ out.stdout: FreeBSD srv-3 14.2-RELEASE-p1 FreeBSD 14.2-RELEASE-p1 GENERIC amd64
+ ok: [srv_2] =>
+ out.stdout: FreeBSD srv-2 14.2-RELEASE-p1 FreeBSD 14.2-RELEASE-p1 GENERIC amd64
+
+ PLAY RECAP **********************************************************************************************************
+ srv_1 : ok=2 changed=1 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0
+ srv_2 : ok=2 changed=1 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0
+ srv_3 : ok=2 changed=1 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0
+
+Note: This playbook and the inventory configuration works also for the *Shared IP Jails*.
diff --git a/docs/docsite/rst/guide_iocage_inventory_properties.rst b/docs/docsite/rst/guide_iocage_inventory_properties.rst
new file mode 100644
index 0000000000..d044f2e7f2
--- /dev/null
+++ b/docs/docsite/rst/guide_iocage_inventory_properties.rst
@@ -0,0 +1,201 @@
+..
+ Copyright (c) Ansible Project
+ GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+ SPDX-License-Identifier: GPL-3.0-or-later
+
+.. _ansible_collections.community.general.docsite.guide_iocage.guide_iocage_inventory.guide_iocage_inventory_properties:
+
+Properties
+----------
+
+Optionally, in the inventory file ``hosts/02_iocage.yml``, get the iocage properties. Enable
+:ansopt:`community.general.iocage#inventory:get_properties`:
+
+.. code-block:: yaml+jinja
+
+ plugin: community.general.iocage
+ host: 10.1.0.73
+ user: admin
+ get_properties: true
+ hooks_results:
+ - /var/db/dhclient-hook.address.epair0b
+ compose:
+ ansible_host: (iocage_hooks.0 == '-') | ternary(iocage_ip4, iocage_hooks.0)
+
+Display the properties. Create the playbook ``pb-test-properties.yml``:
+
+.. code-block:: yaml
+
+ - hosts: all
+ remote_user: admin
+
+ vars:
+
+ ansible_python_interpreter: auto_silent
+
+ tasks:
+
+ - debug:
+ var: iocage_properties
+
+Run the playbook. Limit the inventory to *srv_3*:
+
+.. code-block:: console
+
+ shell> ansible-playbook -i hosts/02_iocage.yml -l srv_3 pb-test-properties.yml
+
+ PLAY [all] **********************************************************************************************************
+
+ TASK [debug] ********************************************************************************************************
+ ok: [srv_3] =>
+ iocage_properties:
+ CONFIG_VERSION: '33'
+ allow_chflags: '0'
+ allow_mlock: '0'
+ allow_mount: '1'
+ allow_mount_devfs: '0'
+ allow_mount_fdescfs: '0'
+ allow_mount_fusefs: '0'
+ allow_mount_linprocfs: '0'
+ allow_mount_linsysfs: '0'
+ allow_mount_nullfs: '0'
+ allow_mount_procfs: '0'
+ allow_mount_tmpfs: '0'
+ allow_mount_zfs: '0'
+ allow_nfsd: '0'
+ allow_quotas: '0'
+ allow_raw_sockets: '0'
+ allow_set_hostname: '1'
+ allow_socket_af: '0'
+ allow_sysvipc: '0'
+ allow_tun: '0'
+ allow_vmm: '0'
+ assign_localhost: '0'
+ available: readonly
+ basejail: '0'
+ boot: '0'
+ bpf: '1'
+ children_max: '0'
+ cloned_release: 14.2-RELEASE
+ comment: none
+ compression: 'on'
+ compressratio: readonly
+ coredumpsize: 'off'
+ count: '1'
+ cpuset: 'off'
+ cputime: 'off'
+ datasize: 'off'
+ dedup: 'off'
+ defaultrouter: auto
+ defaultrouter6: auto
+ depends: none
+ devfs_ruleset: '4'
+ dhcp: '1'
+ enforce_statfs: '2'
+ exec_clean: '1'
+ exec_created: /usr/bin/true
+ exec_fib: '0'
+ exec_jail_user: root
+ exec_poststart: /usr/bin/true
+ exec_poststop: /usr/bin/true
+ exec_prestart: /usr/bin/true
+ exec_prestop: /usr/bin/true
+ exec_start: /bin/sh /etc/rc
+ exec_stop: /bin/sh /etc/rc.shutdown
+ exec_system_jail_user: '0'
+ exec_system_user: root
+ exec_timeout: '60'
+ host_domainname: none
+ host_hostname: srv-3
+ host_hostuuid: srv_3
+ host_time: '1'
+ hostid: ea2ba7d1-4fcd-f13f-82e4-8b32c0a03403
+ hostid_strict_check: '0'
+ interfaces: vnet0:bridge0
+ ip4: new
+ ip4_addr: none
+ ip4_saddrsel: '1'
+ ip6: new
+ ip6_addr: none
+ ip6_saddrsel: '1'
+ ip_hostname: '0'
+ jail_zfs: '0'
+ jail_zfs_dataset: iocage/jails/srv_3/data
+ jail_zfs_mountpoint: none
+ last_started: '2025-06-11 04:29:23'
+ localhost_ip: none
+ login_flags: -f root
+ mac_prefix: 02a098
+ maxproc: 'off'
+ memorylocked: 'off'
+ memoryuse: 'off'
+ min_dyn_devfs_ruleset: '1000'
+ mount_devfs: '1'
+ mount_fdescfs: '1'
+ mount_linprocfs: '0'
+ mount_procfs: '0'
+ mountpoint: readonly
+ msgqqueued: 'off'
+ msgqsize: 'off'
+ nat: '0'
+ nat_backend: ipfw
+ nat_forwards: none
+ nat_interface: none
+ nat_prefix: '172.16'
+ nmsgq: 'off'
+ notes: none
+ nsem: 'off'
+ nsemop: 'off'
+ nshm: 'off'
+ nthr: 'off'
+ openfiles: 'off'
+ origin: readonly
+ owner: root
+ pcpu: 'off'
+ plugin_name: none
+ plugin_repository: none
+ priority: '99'
+ pseudoterminals: 'off'
+ quota: none
+ readbps: 'off'
+ readiops: 'off'
+ release: 14.2-RELEASE-p3
+ reservation: none
+ resolver: /etc/resolv.conf
+ rlimits: 'off'
+ rtsold: '0'
+ securelevel: '2'
+ shmsize: 'off'
+ source_template: ansible_client
+ stacksize: 'off'
+ state: up
+ stop_timeout: '30'
+ swapuse: 'off'
+ sync_state: none
+ sync_target: none
+ sync_tgt_zpool: none
+ sysvmsg: new
+ sysvsem: new
+ sysvshm: new
+ template: '0'
+ type: jail
+ used: readonly
+ vmemoryuse: 'off'
+ vnet: '1'
+ vnet0_mac: 02a0983da05d 02a0983da05e
+ vnet0_mtu: auto
+ vnet1_mac: none
+ vnet1_mtu: auto
+ vnet2_mac: none
+ vnet2_mtu: auto
+ vnet3_mac: none
+ vnet3_mtu: auto
+ vnet_default_interface: auto
+ vnet_default_mtu: '1500'
+ vnet_interfaces: none
+ wallclock: 'off'
+ writebps: 'off'
+ writeiops: 'off'
+
+ PLAY RECAP **********************************************************************************************************
+ srv_3 : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0
diff --git a/docs/docsite/rst/guide_iocage_inventory_tags.rst b/docs/docsite/rst/guide_iocage_inventory_tags.rst
new file mode 100644
index 0000000000..afb645198c
--- /dev/null
+++ b/docs/docsite/rst/guide_iocage_inventory_tags.rst
@@ -0,0 +1,117 @@
+..
+ Copyright (c) Ansible Project
+ GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+ SPDX-License-Identifier: GPL-3.0-or-later
+
+.. _ansible_collections.community.general.docsite.guide_iocage.guide_iocage_inventory.guide_iocage_inventory_tags:
+
+Tags
+----
+
+Quoting `man iocage `_
+
+.. code-block:: text
+
+ PROPERTIES
+ ...
+ notes="any string"
+ Custom notes for miscellaneous tagging.
+ Default: none
+ Source: local
+
+We will use the format `notes="tag1=value1 tag2=value2 ..."`.
+
+.. note::
+
+ The iocage tags have nothing to do with the :ref:`tags`.
+
+As root at the iocage host, set notes. For example,
+
+.. code-block:: console
+
+ shell> iocage set notes="vmm=iocage_02 project=foo" srv_1
+ notes: none -> vmm=iocage_02 project=foo
+ shell> iocage set notes="vmm=iocage_02 project=foo" srv_2
+ notes: none -> vmm=iocage_02 project=foo
+ shell> iocage set notes="vmm=iocage_02 project=bar" srv_3
+ notes: none -> vmm=iocage_02 project=bar
+
+Update the inventory configuration. Compose a dictionary *iocage_tags* and create groups. The option
+:ansopt:`community.general.iocage#inventory:get_properties` must be enabled.
+For example, ``hosts/02_iocage.yml`` could look like:
+
+.. code-block:: yaml
+
+ plugin: community.general.iocage
+ host: 10.1.0.73
+ user: admin
+ get_properties: true
+ hooks_results:
+ - /var/db/dhclient-hook.address.epair0b
+ compose:
+ ansible_host: (iocage_hooks.0 == '-') | ternary(iocage_ip4, iocage_hooks.0)
+ iocage_tags: dict(iocage_properties.notes | split | map('split', '='))
+ keyed_groups:
+ - prefix: vmm
+ key: iocage_tags.vmm
+ - prefix: project
+ key: iocage_tags.project
+
+Display tags and groups. Create a playbook ``pb-test-groups.yml``:
+
+.. code-block:: yaml+jinja
+
+ - hosts: all
+ remote_user: admin
+
+ vars:
+
+ ansible_python_interpreter: auto_silent
+
+ tasks:
+
+ - debug:
+ var: iocage_tags
+
+ - debug:
+ msg: |
+ {% for group in groups %}
+ {{ group }}: {{ groups[group] }}
+ {% endfor %}
+ run_once: true
+
+Run the playbook:
+
+.. code-block:: console
+
+ shell> ansible-playbook -i hosts/02_iocage.yml pb-test-groups.yml
+
+ PLAY [all] **********************************************************************************************************
+
+ TASK [debug] ********************************************************************************************************
+ ok: [srv_1] =>
+ iocage_tags:
+ project: foo
+ vmm: iocage_02
+ ok: [srv_2] =>
+ iocage_tags:
+ project: foo
+ vmm: iocage_02
+ ok: [srv_3] =>
+ iocage_tags:
+ project: bar
+ vmm: iocage_02
+
+ TASK [debug] ********************************************************************************************************
+ ok: [srv_1] =>
+ msg: |-
+ all: ['srv_1', 'srv_2', 'srv_3']
+ ungrouped: []
+ vmm_iocage_02: ['srv_1', 'srv_2', 'srv_3']
+ project_foo: ['srv_1', 'srv_2']
+ project_bar: ['srv_3']
+
+ PLAY RECAP **********************************************************************************************************
+ srv_1 : ok=2 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0
+ srv_2 : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0
+ srv_3 : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0
diff --git a/docs/docsite/rst/guide_modulehelper.rst b/docs/docsite/rst/guide_modulehelper.rst
index 1f8d305643..12657f4479 100644
--- a/docs/docsite/rst/guide_modulehelper.rst
+++ b/docs/docsite/rst/guide_modulehelper.rst
@@ -38,7 +38,6 @@ But bear in mind that it does not showcase all of MH's features:
),
supports_check_mode=True,
)
- use_old_vardict = False
def __run__(self):
self.vars.original_message = ''
@@ -76,13 +75,14 @@ section above, but there are more elements that will take part in it.
from ansible_collections.community.general.plugins.module_utils.module_helper import ModuleHelper
class MyTest(ModuleHelper):
+ # behavior for module paramaters ONLY, see below for further information
output_params = ()
change_params = ()
diff_params = ()
- facts_name = None
facts_params = ()
- use_old_vardict = True
- mute_vardict_deprecation = False
+
+ facts_name = None # used if generating facts, from parameters or otherwise
+
module = dict(
argument_spec=dict(...),
# ...
@@ -202,27 +202,14 @@ By using ``self.vars``, you get a central mechanism to access the parameters but
As described in :ref:`ansible_collections.community.general.docsite.guide_vardict`, variables in ``VarDict`` have metadata associated to them.
One of the attributes in that metadata marks the variable for output, and MH makes use of that to generate the module's return values.
-.. important::
+.. note::
- The ``VarDict`` feature described was introduced in community.general 7.1.0, but there was a first
- implementation of it embedded within ``ModuleHelper``.
- That older implementation is now deprecated and will be removed in community.general 11.0.0.
- After community.general 7.1.0, MH modules generate a deprecation message about *using the old VarDict*.
- There are two ways to prevent that from happening:
+ The ``VarDict`` class was introduced in community.general 7.1.0, as part of ``ModuleHelper`` itself.
+ However, it has been factored out to become an utility on its own, described in :ref:`ansible_collections.community.general.docsite.guide_vardict`,
+ and the older implementation was removed in community.general 11.0.0.
- #. Set ``mute_vardict_deprecation = True`` and the deprecation will be silenced. If the module still uses the old ``VarDict``,
- it will not be able to update to community.general 11.0.0 (Spring 2026) upon its release.
- #. Set ``use_old_vardict = False`` to make the MH module use the new ``VarDict`` immediatelly.
- The new ``VarDict`` and its use is documented and this is the recommended way to handle this.
-
- .. code-block:: python
-
- class MyTest(ModuleHelper):
- use_old_vardict = False
- mute_vardict_deprecation = True
- ...
-
- These two settings are mutually exclusive, but that is not enforced and the behavior when setting both is not specified.
+ Some code might still refer to the class variables ``use_old_vardict`` and ``mute_vardict_deprecation``, used for the transtition to the new
+ implementation but from community.general 11.0.0 onwards they are no longer used and can be safely removed from the code.
Contrary to new variables created in ``VarDict``, module parameters are not set for output by default.
If you want to include some module parameters in the output, list them in the ``output_params`` class variable.
@@ -233,6 +220,11 @@ If you want to include some module parameters in the output, list them in the ``
output_params = ('state', 'name')
...
+.. important::
+
+ The variable names listed in ``output_params`` **must be module parameters**, as in parameters listed in the module's ``argument_spec``.
+ Names not found in ``argument_spec`` are silently ignored.
+
Another neat feature provided by MH by using ``VarDict`` is the automatic tracking of changes when setting the metadata ``change=True``.
Again, to enable this feature for module parameters, you must list them in the ``change_params`` class variable.
@@ -243,6 +235,11 @@ Again, to enable this feature for module parameters, you must list them in the `
change_params = ('value', )
...
+.. important::
+
+ The variable names listed in ``change_params`` **must be module parameters**, as in parameters listed in the module's ``argument_spec``.
+ Names not found in ``argument_spec`` are silently ignored.
+
.. seealso::
See more about this in
@@ -260,6 +257,11 @@ With that, MH will automatically generate the diff output for variables that hav
# example from community.general.gio_mime
self.vars.set_meta("handler", initial_value=gio_mime_get(self.runner, self.vars.mime_type), diff=True, change=True)
+.. important::
+
+ The variable names listed in ``diff_params`` **must be module parameters**, as in parameters listed in the module's ``argument_spec``.
+ Names not found in ``argument_spec`` are silently ignored.
+
Moreover, if a module is set to return *facts* instead of return values, then again use the metadata ``fact=True`` and ``fact_params`` for module parameters.
Additionally, you must specify ``facts_name``, as in:
@@ -283,6 +285,11 @@ That generates an Ansible fact like:
debug:
msg: Volume fact is {{ ansible_facts.volume_facts.volume }}
+.. important::
+
+ The variable names listed in ``fact_params`` **must be module parameters**, as in parameters listed in the module's ``argument_spec``.
+ Names not found in ``argument_spec`` are silently ignored.
+
.. important::
If ``facts_name`` is not set, the module does not generate any facts.
@@ -384,7 +391,6 @@ By using ``StateModuleHelper`` you can make your code like the excerpt from the
module = dict(
...
)
- use_old_vardict = False
def __init_module__(self):
self.runner = gconftool2_runner(self.module, check_rc=True)
diff --git a/docs/docsite/rst/guide_packet.rst b/docs/docsite/rst/guide_packet.rst
index 9de5e3f614..95b38dddd0 100644
--- a/docs/docsite/rst/guide_packet.rst
+++ b/docs/docsite/rst/guide_packet.rst
@@ -67,16 +67,16 @@ The following code block is a simple playbook that creates one `Type 0
- hostnames: myserver
- operating_system: ubuntu_16_04
- plan: baremetal_0
- facility: sjc1
+ - community.general.packet_device:
+ project_id:
+ hostnames: myserver
+ operating_system: ubuntu_16_04
+ plan: baremetal_0
+ facility: sjc1
After running ``ansible-playbook playbook_create.yml``, you should have a server provisioned on Packet. You can verify through a CLI or in the `Packet portal `__.
@@ -110,10 +110,10 @@ If your playbook acts on existing Packet devices, you can only pass the ``hostna
hosts: localhost
tasks:
- - community.general.packet_device:
- project_id:
- hostnames: myserver
- state: rebooted
+ - community.general.packet_device:
+ project_id:
+ hostnames: myserver
+ state: rebooted
You can also identify specific Packet devices with the ``device_ids`` parameter. The device's UUID can be found in the `Packet Portal `_ or by using a `CLI `_. The following playbook removes a Packet device using the ``device_ids`` field:
@@ -125,10 +125,10 @@ You can also identify specific Packet devices with the ``device_ids`` parameter.
hosts: localhost
tasks:
- - community.general.packet_device:
- project_id:
- device_ids:
- state: absent
+ - community.general.packet_device:
+ project_id:
+ device_ids:
+ state: absent
More Complex Playbooks
@@ -153,43 +153,43 @@ The following playbook will create an SSH key, 3 Packet servers, and then wait u
hosts: localhost
tasks:
- - community.general.packet_sshkey:
- key_file: ./id_rsa.pub
- label: new
+ - community.general.packet_sshkey:
+ key_file: ./id_rsa.pub
+ label: new
- - community.general.packet_device:
- hostnames: [coreos-one, coreos-two, coreos-three]
- operating_system: coreos_beta
- plan: baremetal_0
- facility: ewr1
- project_id:
- wait_for_public_IPv: 4
- user_data: |
- #cloud-config
- coreos:
- etcd2:
- discovery: https://discovery.etcd.io/
- advertise-client-urls: http://$private_ipv4:2379,http://$private_ipv4:4001
- initial-advertise-peer-urls: http://$private_ipv4:2380
- listen-client-urls: http://0.0.0.0:2379,http://0.0.0.0:4001
- listen-peer-urls: http://$private_ipv4:2380
- fleet:
- public-ip: $private_ipv4
- units:
- - name: etcd2.service
- command: start
- - name: fleet.service
- command: start
- register: newhosts
+ - community.general.packet_device:
+ hostnames: [coreos-one, coreos-two, coreos-three]
+ operating_system: coreos_beta
+ plan: baremetal_0
+ facility: ewr1
+ project_id:
+ wait_for_public_IPv: 4
+ user_data: |
+ # cloud-config
+ coreos:
+ etcd2:
+ discovery: https://discovery.etcd.io/
+ advertise-client-urls: http://$private_ipv4:2379,http://$private_ipv4:4001
+ initial-advertise-peer-urls: http://$private_ipv4:2380
+ listen-client-urls: http://0.0.0.0:2379,http://0.0.0.0:4001
+ listen-peer-urls: http://$private_ipv4:2380
+ fleet:
+ public-ip: $private_ipv4
+ units:
+ - name: etcd2.service
+ command: start
+ - name: fleet.service
+ command: start
+ register: newhosts
- - name: wait for ssh
- ansible.builtin.wait_for:
- delay: 1
- host: "{{ item.public_ipv4 }}"
- port: 22
- state: started
- timeout: 500
- loop: "{{ newhosts.results[0].devices }}"
+ - name: wait for ssh
+ ansible.builtin.wait_for:
+ delay: 1
+ host: "{{ item.public_ipv4 }}"
+ port: 22
+ state: started
+ timeout: 500
+ loop: "{{ newhosts.results[0].devices }}"
As with most Ansible modules, the default states of the Packet modules are idempotent, meaning the resources in your project will remain the same after re-runs of a playbook. Thus, we can keep the ``packet_sshkey`` module call in our playbook. If the public key is already in your Packet account, the call will have no effect.
diff --git a/docs/docsite/rst/guide_vardict.rst b/docs/docsite/rst/guide_vardict.rst
index f65b09055b..1beef0c57f 100644
--- a/docs/docsite/rst/guide_vardict.rst
+++ b/docs/docsite/rst/guide_vardict.rst
@@ -51,7 +51,7 @@ And by the time the module is about to exit:
That makes the return value of the module:
-.. code-block:: javascript
+.. code-block:: json
{
"abc": 123,
diff --git a/docs/docsite/rst/test_guide.rst b/docs/docsite/rst/test_guide.rst
index 7a261c7552..a1f5723df4 100644
--- a/docs/docsite/rst/test_guide.rst
+++ b/docs/docsite/rst/test_guide.rst
@@ -8,7 +8,7 @@
community.general Test (Plugin) Guide
=====================================
-The :ref:`community.general collection ` offers currently one test plugin.
+The :anscollection:`community.general collection ` offers currently one test plugin.
.. contents:: Topics
diff --git a/galaxy.yml b/galaxy.yml
index d73f85bee1..a39ffcc7e5 100644
--- a/galaxy.yml
+++ b/galaxy.yml
@@ -5,7 +5,7 @@
namespace: community
name: general
-version: 10.6.0
+version: 11.2.0
readme: README.md
authors:
- Ansible (https://github.com/ansible)
diff --git a/meta/runtime.yml b/meta/runtime.yml
index 8d003a6816..4efdc68688 100644
--- a/meta/runtime.yml
+++ b/meta/runtime.yml
@@ -3,7 +3,7 @@
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-requires_ansible: '>=2.15.0'
+requires_ansible: '>=2.16.0'
action_groups:
consul:
- consul_agent_check
@@ -15,24 +15,9 @@ action_groups:
- consul_session
- consul_token
proxmox:
- - proxmox
- - proxmox_backup
- - proxmox_backup_info
- - proxmox_disk
- - proxmox_domain_info
- - proxmox_group_info
- - proxmox_kvm
- - proxmox_nic
- - proxmox_node_info
- - proxmox_pool
- - proxmox_pool_member
- - proxmox_snap
- - proxmox_storage_contents_info
- - proxmox_storage_info
- - proxmox_tasks_info
- - proxmox_template
- - proxmox_user_info
- - proxmox_vm_info
+ - metadata:
+ extend_group:
+ - community.proxmox.proxmox
keycloak:
- keycloak_authentication
- keycloak_authentication_required_actions
@@ -86,20 +71,26 @@ plugin_routing:
= yes' option.
yaml:
deprecation:
- removal_version: 13.0.0
- warning_text: The plugin has been superseded by the the option `result_format=yaml` in callback plugin ansible.builtin.default from ansible-core 2.13 onwards.
+ removal_version: 12.0.0
+ warning_text: >-
+ The plugin has been superseded by the the option `result_format=yaml` in callback plugin ansible.builtin.default from ansible-core 2.13 onwards.
connection:
docker:
redirect: community.docker.docker
oc:
redirect: community.okd.oc
+ proxmox_pct_remote:
+ redirect: community.proxmox.proxmox_pct_remote
+ deprecation:
+ removal_version: 15.0.0
+ warning_text: The proxmox content has been moved to community.proxmox.
lookup:
gcp_storage_file:
redirect: community.google.gcp_storage_file
hashi_vault:
redirect: community.hashi_vault.hashi_vault
manifold:
- deprecation:
+ tombstone:
removal_version: 11.0.0
warning_text: Company was acquired in 2021 and service was ceased afterwards.
nios:
@@ -125,42 +116,46 @@ plugin_routing:
deprecation:
removal_version: 13.0.0
warning_text: Project Atomic was sunset by the end of 2019.
+ catapult:
+ deprecation:
+ removal_version: 13.0.0
+ warning_text: DNS fails to resolve the API endpoint used by the module since Oct 2024. See https://github.com/ansible-collections/community.general/issues/10318 for details.
cisco_spark:
redirect: community.general.cisco_webex
clc_alert_policy:
- deprecation:
+ tombstone:
removal_version: 11.0.0
warning_text: CenturyLink Cloud services went EOL in September 2023.
clc_blueprint_package:
- deprecation:
+ tombstone:
removal_version: 11.0.0
warning_text: CenturyLink Cloud services went EOL in September 2023.
clc_firewall_policy:
- deprecation:
+ tombstone:
removal_version: 11.0.0
warning_text: CenturyLink Cloud services went EOL in September 2023.
clc_group:
- deprecation:
+ tombstone:
removal_version: 11.0.0
warning_text: CenturyLink Cloud services went EOL in September 2023.
clc_loadbalancer:
- deprecation:
+ tombstone:
removal_version: 11.0.0
warning_text: CenturyLink Cloud services went EOL in September 2023.
clc_modify_server:
- deprecation:
+ tombstone:
removal_version: 11.0.0
warning_text: CenturyLink Cloud services went EOL in September 2023.
clc_publicip:
- deprecation:
+ tombstone:
removal_version: 11.0.0
warning_text: CenturyLink Cloud services went EOL in September 2023.
clc_server:
- deprecation:
+ tombstone:
removal_version: 11.0.0
warning_text: CenturyLink Cloud services went EOL in September 2023.
clc_server_snapshot:
- deprecation:
+ tombstone:
removal_version: 11.0.0
warning_text: CenturyLink Cloud services went EOL in September 2023.
consul_acl:
@@ -319,7 +314,7 @@ plugin_routing:
hetzner_firewall_info:
redirect: community.hrobot.firewall_info
hipchat:
- deprecation:
+ tombstone:
removal_version: 11.0.0
warning_text: The hipchat service has been discontinued and the self-hosted variant has been End of Life since 2020.
hpilo_facts:
@@ -644,25 +639,115 @@ plugin_routing:
postgresql_user_obj_stat_info:
redirect: community.postgresql.postgresql_user_obj_stat_info
profitbricks:
- deprecation:
+ tombstone:
removal_version: 11.0.0
warning_text: Supporting library is unsupported since 2021.
profitbricks_datacenter:
- deprecation:
+ tombstone:
removal_version: 11.0.0
warning_text: Supporting library is unsupported since 2021.
profitbricks_nic:
- deprecation:
+ tombstone:
removal_version: 11.0.0
warning_text: Supporting library is unsupported since 2021.
profitbricks_volume:
- deprecation:
+ tombstone:
removal_version: 11.0.0
warning_text: Supporting library is unsupported since 2021.
profitbricks_volume_attachments:
- deprecation:
+ tombstone:
removal_version: 11.0.0
warning_text: Supporting library is unsupported since 2021.
+ proxmox:
+ redirect: community.proxmox.proxmox
+ deprecation:
+ removal_version: 15.0.0
+ warning_text: The proxmox content has been moved to community.proxmox.
+ proxmox_backup:
+ redirect: community.proxmox.proxmox_backup
+ deprecation:
+ removal_version: 15.0.0
+ warning_text: The proxmox content has been moved to community.proxmox.
+ proxmox_backup_info:
+ redirect: community.proxmox.proxmox_backup_info
+ deprecation:
+ removal_version: 15.0.0
+ warning_text: The proxmox content has been moved to community.proxmox.
+ proxmox_disk:
+ redirect: community.proxmox.proxmox_disk
+ deprecation:
+ removal_version: 15.0.0
+ warning_text: The proxmox content has been moved to community.proxmox.
+ proxmox_domain_info:
+ redirect: community.proxmox.proxmox_domain_info
+ deprecation:
+ removal_version: 15.0.0
+ warning_text: The proxmox content has been moved to community.proxmox.
+ proxmox_group_info:
+ redirect: community.proxmox.proxmox_group_info
+ deprecation:
+ removal_version: 15.0.0
+ warning_text: The proxmox content has been moved to community.proxmox.
+ proxmox_kvm:
+ redirect: community.proxmox.proxmox_kvm
+ deprecation:
+ removal_version: 15.0.0
+ warning_text: The proxmox content has been moved to community.proxmox.
+ proxmox_nic:
+ redirect: community.proxmox.proxmox_nic
+ deprecation:
+ removal_version: 15.0.0
+ warning_text: The proxmox content has been moved to community.proxmox.
+ proxmox_node_info:
+ redirect: community.proxmox.proxmox_node_info
+ deprecation:
+ removal_version: 15.0.0
+ warning_text: The proxmox content has been moved to community.proxmox.
+ proxmox_pool:
+ redirect: community.proxmox.proxmox_pool
+ deprecation:
+ removal_version: 15.0.0
+ warning_text: The proxmox content has been moved to community.proxmox.
+ proxmox_pool_member:
+ redirect: community.proxmox.proxmox_pool_member
+ deprecation:
+ removal_version: 15.0.0
+ warning_text: The proxmox content has been moved to community.proxmox.
+ proxmox_snap:
+ redirect: community.proxmox.proxmox_snap
+ deprecation:
+ removal_version: 15.0.0
+ warning_text: The proxmox content has been moved to community.proxmox.
+ proxmox_storage_contents_info:
+ redirect: community.proxmox.proxmox_storage_contents_info
+ deprecation:
+ removal_version: 15.0.0
+ warning_text: The proxmox content has been moved to community.proxmox.
+ proxmox_storage_info:
+ redirect: community.proxmox.proxmox_storage_info
+ deprecation:
+ removal_version: 15.0.0
+ warning_text: The proxmox content has been moved to community.proxmox.
+ proxmox_tasks_info:
+ redirect: community.proxmox.proxmox_tasks_info
+ deprecation:
+ removal_version: 15.0.0
+ warning_text: The proxmox content has been moved to community.proxmox.
+ proxmox_template:
+ redirect: community.proxmox.proxmox_template
+ deprecation:
+ removal_version: 15.0.0
+ warning_text: The proxmox content has been moved to community.proxmox.
+ proxmox_user_info:
+ redirect: community.proxmox.proxmox_user_info
+ deprecation:
+ removal_version: 15.0.0
+ warning_text: The proxmox content has been moved to community.proxmox.
+ proxmox_vm_info:
+ redirect: community.proxmox.proxmox_vm_info
+ deprecation:
+ removal_version: 15.0.0
+ warning_text: The proxmox content has been moved to community.proxmox.
purefa_facts:
tombstone:
removal_version: 3.0.0
@@ -873,6 +958,10 @@ plugin_routing:
warning_text: This module relied on HTTPS APIs that do not exist anymore,
and any new development in the direction of providing an alternative should
happen in the context of the google.cloud collection.
+ typetalk:
+ deprecation:
+ removal_version: 13.0.0
+ warning_text: The typetalk service will be discontinued on Dec 2025.
vertica_facts:
tombstone:
removal_version: 3.0.0
@@ -921,6 +1010,11 @@ plugin_routing:
redirect: infoblox.nios_modules.nios
postgresql:
redirect: community.postgresql.postgresql
+ proxmox:
+ redirect: community.proxmox.proxmox
+ deprecation:
+ removal_version: 15.0.0
+ warning_text: The proxmox content has been moved to community.proxmox.
purestorage:
deprecation:
removal_version: 12.0.0
@@ -949,6 +1043,11 @@ plugin_routing:
redirect: infoblox.nios_modules.api
postgresql:
redirect: community.postgresql.postgresql
+ proxmox:
+ redirect: community.proxmox.proxmox
+ deprecation:
+ removal_version: 15.0.0
+ warning_text: The proxmox content has been moved to community.proxmox.
pure:
deprecation:
removal_version: 12.0.0
@@ -966,10 +1065,15 @@ plugin_routing:
redirect: community.docker.docker_machine
docker_swarm:
redirect: community.docker.docker_swarm
+ proxmox:
+ redirect: community.proxmox.proxmox
+ deprecation:
+ removal_version: 15.0.0
+ warning_text: The proxmox content has been moved to community.proxmox.
kubevirt:
redirect: community.kubevirt.kubevirt
stackpath_compute:
- deprecation:
+ tombstone:
removal_version: 11.0.0
warning_text: The company and the service were sunset in June 2024.
filter:
diff --git a/plugins/become/doas.py b/plugins/become/doas.py
index 13aef19874..ca12faea0d 100644
--- a/plugins/become/doas.py
+++ b/plugins/become/doas.py
@@ -72,7 +72,7 @@ options:
prompt_l10n:
description:
- List of localized strings to match for prompt detection.
- - If empty we will use the built in one.
+ - If empty the plugin uses the built-in one.
type: list
elements: string
default: []
@@ -83,6 +83,9 @@ options:
- name: ansible_doas_prompt_l10n
env:
- name: ANSIBLE_DOAS_PROMPT_L10N
+notes:
+ - This become plugin does not work when connection pipelining is enabled. With ansible-core 2.19+, using it automatically
+ disables pipelining. On ansible-core 2.18 and before, pipelining must explicitly be disabled by the user.
"""
import re
@@ -99,6 +102,10 @@ class BecomeModule(BecomeBase):
fail = ('Permission denied',)
missing = ('Authorization required',)
+ # See https://github.com/ansible-collections/community.general/issues/9977,
+ # https://github.com/ansible/ansible/pull/78111
+ pipelining = False
+
def check_password_prompt(self, b_output):
''' checks if the expected password prompt exists in b_output '''
diff --git a/plugins/become/ksu.py b/plugins/become/ksu.py
index 1c936e46da..be56fd6128 100644
--- a/plugins/become/ksu.py
+++ b/plugins/become/ksu.py
@@ -73,7 +73,7 @@ options:
prompt_l10n:
description:
- List of localized strings to match for prompt detection.
- - If empty we will use the built in one.
+ - If empty the plugin uses the built-in one.
type: list
elements: string
default: []
diff --git a/plugins/become/machinectl.py b/plugins/become/machinectl.py
index 81a9d06f86..ad3daa916d 100644
--- a/plugins/become/machinectl.py
+++ b/plugins/become/machinectl.py
@@ -71,10 +71,12 @@ options:
- section: machinectl_become_plugin
key: password
notes:
- - When not using this plugin with user V(root), it only works correctly with a polkit rule which will alter the behaviour
- of machinectl. This rule must alter the prompt behaviour to ask directly for the user credentials, if the user is allowed
- to perform the action (take a look at the examples section). If such a rule is not present the plugin only work if it
- is used in context with the root user, because then no further prompt will be shown by machinectl.
+ - When not using this plugin with user V(root), it only works correctly with a polkit rule which alters the behaviour
+ of C(machinectl). This rule must alter the prompt behaviour to ask directly for the user credentials, if the user is allowed
+ to perform the action (take a look at the examples section). If such a rule is not present the plugin only works if it
+ is used in context with the root user, because then no further prompt is shown by C(machinectl).
+ - This become plugin does not work when connection pipelining is enabled. With ansible-core 2.19+, using it automatically
+ disables pipelining. On ansible-core 2.18 and before, pipelining must explicitly be disabled by the user.
"""
EXAMPLES = r"""
@@ -107,6 +109,10 @@ class BecomeModule(BecomeBase):
success = ('==== AUTHENTICATION COMPLETE ====',)
require_tty = True # see https://github.com/ansible-collections/community.general/issues/6932
+ # See https://github.com/ansible/ansible/issues/81254,
+ # https://github.com/ansible/ansible/pull/78111
+ pipelining = False
+
@staticmethod
def remove_ansi_codes(line):
return ansi_color_codes.sub(b"", line)
diff --git a/plugins/become/pfexec.py b/plugins/become/pfexec.py
index 65690f359b..9faf1ffc63 100644
--- a/plugins/become/pfexec.py
+++ b/plugins/become/pfexec.py
@@ -6,7 +6,7 @@ from __future__ import annotations
DOCUMENTATION = r"""
name: pfexec
-short_description: profile based execution
+short_description: Profile based execution
description:
- This become plugins allows your remote/login user to execute commands as another user using the C(pfexec) utility.
author: Ansible Core Team
diff --git a/plugins/become/run0.py b/plugins/become/run0.py
index 39e4667e7a..dce7c22448 100644
--- a/plugins/become/run0.py
+++ b/plugins/become/run0.py
@@ -61,7 +61,7 @@ options:
- name: ANSIBLE_RUN0_FLAGS
type: string
notes:
- - This plugin will only work when a C(polkit) rule is in place.
+ - This plugin only works when a C(polkit) rule is in place.
"""
EXAMPLES = r"""
diff --git a/plugins/cache/pickle.py b/plugins/cache/pickle.py
index 2f4b2b7b02..1e9ffcb264 100644
--- a/plugins/cache/pickle.py
+++ b/plugins/cache/pickle.py
@@ -17,7 +17,7 @@ options:
_uri:
required: true
description:
- - Path in which the cache plugin will save the files.
+ - Path in which the cache plugin saves the files.
env:
- name: ANSIBLE_CACHE_PLUGIN_CONNECTION
ini:
@@ -56,6 +56,7 @@ class CacheModule(BaseFileCacheModule):
"""
A caching module backed by pickle files.
"""
+ _persistent = False # prevent unnecessary JSON serialization and key munging
def _load(self, filepath):
# Pickle is a binary format
diff --git a/plugins/cache/yaml.py b/plugins/cache/yaml.py
index 676423d3b6..8bf61f6898 100644
--- a/plugins/cache/yaml.py
+++ b/plugins/cache/yaml.py
@@ -17,7 +17,7 @@ options:
_uri:
required: true
description:
- - Path in which the cache plugin will save the files.
+ - Path in which the cache plugin saves the files.
env:
- name: ANSIBLE_CACHE_PLUGIN_CONNECTION
ini:
diff --git a/plugins/callback/context_demo.py b/plugins/callback/context_demo.py
index 28be2882b6..e846aa2786 100644
--- a/plugins/callback/context_demo.py
+++ b/plugins/callback/context_demo.py
@@ -10,7 +10,7 @@ DOCUMENTATION = r"""
author: Unknown (!UNKNOWN)
name: context_demo
type: aggregate
-short_description: demo callback that adds play/task context
+short_description: Demo callback that adds play/task context
description:
- Displays some play and task context along with normal output.
- This is mostly for demo purposes.
diff --git a/plugins/callback/counter_enabled.py b/plugins/callback/counter_enabled.py
index 15fc85a01b..2377d46585 100644
--- a/plugins/callback/counter_enabled.py
+++ b/plugins/callback/counter_enabled.py
@@ -12,11 +12,11 @@ DOCUMENTATION = r"""
author: Unknown (!UNKNOWN)
name: counter_enabled
type: stdout
-short_description: adds counters to the output items (tasks and hosts/task)
+short_description: Adds counters to the output items (tasks and hosts/task)
description:
- Use this callback when you need a kind of progress bar on a large environments.
- - You will know how many tasks has the playbook to run, and which one is actually running.
- - You will know how many hosts may run a task, and which of them is actually running.
+ - You can see how many tasks has the playbook to run, and which one is actually running.
+ - You can see how many hosts may run a task, and which of them is actually running.
extends_documentation_fragment:
- default_callback
requirements:
diff --git a/plugins/callback/dense.py b/plugins/callback/dense.py
index 67cad4fd8f..1fd68b5d60 100644
--- a/plugins/callback/dense.py
+++ b/plugins/callback/dense.py
@@ -9,11 +9,11 @@ from __future__ import annotations
DOCUMENTATION = r"""
name: dense
type: stdout
-short_description: minimal stdout output
+short_description: Minimal stdout output
extends_documentation_fragment:
- default_callback
description:
- - When in verbose mode it will act the same as the default callback.
+ - When in verbose mode it acts the same as the default callback.
author:
- Dag Wieers (@dagwieers)
requirements:
@@ -263,12 +263,8 @@ class CallbackModule(CallbackModule_default):
sys.stdout.write(colors[self.hosts[name]['state']] + name + vt100.reset)
sys.stdout.flush()
-# if result._result.get('diff', False):
-# sys.stdout.write('\n' + vt100.linewrap)
sys.stdout.write(vt100.linewrap)
-# self.keep = True
-
def _display_task_banner(self):
if not self.shown_title:
self.shown_title = True
@@ -312,12 +308,12 @@ class CallbackModule(CallbackModule_default):
delegated_vars = result._result.get('_ansible_delegated_vars', None)
if delegated_vars:
- sys.stdout.write(f"{vt100.reset + result._host.get_name()}>{colors[status]}{delegated_vars['ansible_host']}")
+ sys.stdout.write(f"{vt100.reset}{result._host.get_name()}>{colors[status]}{delegated_vars['ansible_host']}")
else:
sys.stdout.write(result._host.get_name())
sys.stdout.write(f": {dump}\n")
- sys.stdout.write(vt100.reset + vt100.save + vt100.clearline)
+ sys.stdout.write(f"{vt100.reset}{vt100.save}{vt100.clearline}")
sys.stdout.flush()
if status == 'changed':
diff --git a/plugins/callback/diy.py b/plugins/callback/diy.py
index a4369daadd..f84789d010 100644
--- a/plugins/callback/diy.py
+++ b/plugins/callback/diy.py
@@ -23,15 +23,15 @@ notes:
that is available using the other various execution contexts, such as playbook, play, task, and so on so forth.
- Options being set by their respective variable input can only be set using the variable if the variable was set in a context
that is available to the respective callback. Use the C(ansible_callback_diy) dictionary to see what is available to a
- callback. Additionally, C(ansible_callback_diy.top_level_var_names) will output the top level variable names available
+ callback. Additionally, C(ansible_callback_diy.top_level_var_names) outputs the top level variable names available
to the callback.
- Each option value is rendered as a template before being evaluated. This allows for the dynamic usage of an option. For
- example, C("{{ 'yellow' if ansible_callback_diy.result.is_changed else 'bright green' }}").
- - 'B(Condition) for all C(msg) options: if value C(is None or omit), then the option is not being used. B(Effect): use
- of the C(default) callback plugin for output.'
- - 'B(Condition) for all C(msg) options: if value C(is not None and not omit and length is not greater than 0), then the
+ example, V("{{ 'yellow' if ansible_callback_diy.result.is_changed else 'bright green' }}").
+ - 'B(Condition) for all C(msg) options: if value V(is None or omit), then the option is not being used. B(Effect): use of
+ the C(default) callback plugin for output.'
+ - 'B(Condition) for all C(msg) options: if value V(is not None and not omit and length is not greater than 0), then the
option is being used without output. B(Effect): suppress output.'
- - 'B(Condition) for all C(msg) options: if value C(is not None and not omit and length is greater than 0), then the option
+ - 'B(Condition) for all C(msg) options: if value V(is not None and not omit and length is greater than 0), then the option
is being used with output. B(Effect): render value as template and output.'
- 'Valid color values: V(black), V(bright gray), V(blue), V(white), V(green), V(bright blue), V(cyan), V(bright green),
V(red), V(bright cyan), V(purple), V(bright red), V(yellow), V(bright purple), V(dark gray), V(bright yellow), V(magenta),
diff --git a/plugins/callback/elastic.py b/plugins/callback/elastic.py
index cfa66e53b9..a4b0974f0b 100644
--- a/plugins/callback/elastic.py
+++ b/plugins/callback/elastic.py
@@ -87,6 +87,7 @@ from contextlib import closing
from os.path import basename
from ansible.errors import AnsibleError, AnsibleRuntimeError
+from ansible.module_utils.ansible_release import __version__ as ansible_version
from ansible.module_utils.six import raise_from
from ansible.plugins.callback import CallbackBase
@@ -140,7 +141,6 @@ class HostData:
class ElasticSource(object):
def __init__(self, display):
self.ansible_playbook = ""
- self.ansible_version = None
self.session = str(uuid.uuid4())
self.host = socket.gethostname()
try:
@@ -183,9 +183,6 @@ class ElasticSource(object):
task = tasks_data[task_uuid]
- if self.ansible_version is None and result._task_fields['args'].get('_ansible_version'):
- self.ansible_version = result._task_fields['args'].get('_ansible_version')
-
task.add_host(HostData(host_uuid, host_name, status, result))
def generate_distributed_traces(self, tasks_data, status, end_time, traceparent, apm_service_name,
@@ -209,8 +206,7 @@ class ElasticSource(object):
else:
apm_cli.begin_transaction("Session", start=parent_start_time)
# Populate trace metadata attributes
- if self.ansible_version is not None:
- label(ansible_version=self.ansible_version)
+ label(ansible_version=ansible_version)
label(ansible_session=self.session, ansible_host_name=self.host, ansible_host_user=self.user)
if self.ip_address is not None:
label(ansible_host_ip=self.ip_address)
diff --git a/plugins/callback/jabber.py b/plugins/callback/jabber.py
index 10aa866142..c5a0881e14 100644
--- a/plugins/callback/jabber.py
+++ b/plugins/callback/jabber.py
@@ -10,7 +10,7 @@ DOCUMENTATION = r"""
author: Unknown (!UNKNOWN)
name: jabber
type: notification
-short_description: post task events to a Jabber server
+short_description: Post task events to a Jabber server
description:
- The chatty part of ChatOps with a Hipchat server as a target.
- This callback plugin sends status updates to a HipChat channel during playbook execution.
@@ -36,7 +36,7 @@ options:
env:
- name: JABBER_PASS
to:
- description: Chat identifier that will receive the message.
+ description: Chat identifier that receives the message.
type: str
required: true
env:
diff --git a/plugins/callback/log_plays.py b/plugins/callback/log_plays.py
index 483976acae..3de6c0bec0 100644
--- a/plugins/callback/log_plays.py
+++ b/plugins/callback/log_plays.py
@@ -10,7 +10,7 @@ DOCUMENTATION = r"""
author: Unknown (!UNKNOWN)
name: log_plays
type: notification
-short_description: write playbook output to log file
+short_description: Write playbook output to log file
description:
- This callback writes playbook output to a file per host in the C(/var/log/ansible/hosts) directory.
requirements:
@@ -19,7 +19,7 @@ requirements:
options:
log_folder:
default: /var/log/ansible/hosts
- description: The folder where log files will be created.
+ description: The folder where log files are created.
type: str
env:
- name: ANSIBLE_LOG_FOLDER
diff --git a/plugins/callback/loganalytics.py b/plugins/callback/loganalytics.py
index 224ce7efd8..bd6b89fde1 100644
--- a/plugins/callback/loganalytics.py
+++ b/plugins/callback/loganalytics.py
@@ -11,7 +11,7 @@ type: notification
short_description: Posts task results to Azure Log Analytics
author: "Cyrus Li (@zhcli) "
description:
- - This callback plugin will post task results in JSON formatted to an Azure Log Analytics workspace.
+ - This callback plugin posts task results in JSON formatted to an Azure Log Analytics workspace.
- Credits to authors of splunk callback plugin.
version_added: "2.4.0"
requirements:
@@ -62,6 +62,7 @@ import getpass
from os.path import basename
+from ansible.module_utils.ansible_release import __version__ as ansible_version
from ansible.module_utils.urls import open_url
from ansible.parsing.ajson import AnsibleJSONEncoder
from ansible.plugins.callback import CallbackBase
@@ -75,7 +76,6 @@ class AzureLogAnalyticsSource(object):
def __init__(self):
self.ansible_check_mode = False
self.ansible_playbook = ""
- self.ansible_version = ""
self.session = str(uuid.uuid4())
self.host = socket.gethostname()
self.user = getpass.getuser()
@@ -102,10 +102,6 @@ class AzureLogAnalyticsSource(object):
if result._task_fields['args'].get('_ansible_check_mode') is True:
self.ansible_check_mode = True
- if result._task_fields['args'].get('_ansible_version'):
- self.ansible_version = \
- result._task_fields['args'].get('_ansible_version')
-
if result._task._role:
ansible_role = str(result._task._role)
else:
@@ -119,7 +115,7 @@ class AzureLogAnalyticsSource(object):
data['host'] = self.host
data['user'] = self.user
data['runtime'] = runtime
- data['ansible_version'] = self.ansible_version
+ data['ansible_version'] = ansible_version
data['ansible_check_mode'] = self.ansible_check_mode
data['ansible_host'] = result._host.name
data['ansible_playbook'] = self.ansible_playbook
diff --git a/plugins/callback/logdna.py b/plugins/callback/logdna.py
index 90fe6d4465..9ceb6547b2 100644
--- a/plugins/callback/logdna.py
+++ b/plugins/callback/logdna.py
@@ -11,7 +11,7 @@ name: logdna
type: notification
short_description: Sends playbook logs to LogDNA
description:
- - This callback will report logs from playbook actions, tasks, and events to LogDNA (U(https://app.logdna.com)).
+ - This callback reports logs from playbook actions, tasks, and events to LogDNA (U(https://app.logdna.com)).
requirements:
- LogDNA Python Library (U(https://github.com/logdna/python))
- whitelisting in configuration
diff --git a/plugins/callback/logentries.py b/plugins/callback/logentries.py
index bc5d7e03ce..796398d6b6 100644
--- a/plugins/callback/logentries.py
+++ b/plugins/callback/logentries.py
@@ -11,7 +11,7 @@ name: logentries
type: notification
short_description: Sends events to Logentries
description:
- - This callback plugin will generate JSON objects and send them to Logentries using TCP for auditing/debugging purposes.
+ - This callback plugin generates JSON objects and send them to Logentries using TCP for auditing/debugging purposes.
requirements:
- whitelisting in configuration
- certifi (Python library)
diff --git a/plugins/callback/logstash.py b/plugins/callback/logstash.py
index 9d299e50ed..8b5acc6b9f 100644
--- a/plugins/callback/logstash.py
+++ b/plugins/callback/logstash.py
@@ -12,7 +12,7 @@ name: logstash
type: notification
short_description: Sends events to Logstash
description:
- - This callback will report facts and task events to Logstash U(https://www.elastic.co/products/logstash).
+ - This callback reports facts and task events to Logstash U(https://www.elastic.co/products/logstash).
requirements:
- whitelisting in configuration
- logstash (Python library)
@@ -127,9 +127,7 @@ class CallbackModule(CallbackBase):
if not HAS_LOGSTASH:
self.disabled = True
- self._display.warning("The required python-logstash/python3-logstash is not installed. "
- "pip install python-logstash for Python 2"
- "pip install python3-logstash for Python 3")
+ self._display.warning("The required python3-logstash is not installed.")
self.start_time = now()
@@ -182,7 +180,7 @@ class CallbackModule(CallbackBase):
data['status'] = "OK"
data['ansible_playbook'] = playbook._file_name
- if (self.ls_format_version == "v2"):
+ if self.ls_format_version == "v2":
self.logger.info(
"START PLAYBOOK | %s", data['ansible_playbook'], extra=data
)
@@ -207,7 +205,7 @@ class CallbackModule(CallbackBase):
data['ansible_playbook_duration'] = runtime.total_seconds()
data['ansible_result'] = json.dumps(summarize_stat) # deprecated field
- if (self.ls_format_version == "v2"):
+ if self.ls_format_version == "v2":
self.logger.info(
"FINISH PLAYBOOK | %s", json.dumps(summarize_stat), extra=data
)
@@ -226,7 +224,7 @@ class CallbackModule(CallbackBase):
data['ansible_play_id'] = self.play_id
data['ansible_play_name'] = self.play_name
- if (self.ls_format_version == "v2"):
+ if self.ls_format_version == "v2":
self.logger.info("START PLAY | %s", self.play_name, extra=data)
else:
self.logger.info("ansible play", extra=data)
@@ -251,7 +249,7 @@ class CallbackModule(CallbackBase):
data['ansible_task'] = task_name
data['ansible_facts'] = self._dump_results(result._result)
- if (self.ls_format_version == "v2"):
+ if self.ls_format_version == "v2":
self.logger.info(
"SETUP FACTS | %s", self._dump_results(result._result), extra=data
)
@@ -272,7 +270,7 @@ class CallbackModule(CallbackBase):
data['ansible_task_id'] = self.task_id
data['ansible_result'] = self._dump_results(result._result)
- if (self.ls_format_version == "v2"):
+ if self.ls_format_version == "v2":
self.logger.info(
"TASK OK | %s | RESULT | %s",
task_name, self._dump_results(result._result), extra=data
@@ -293,7 +291,7 @@ class CallbackModule(CallbackBase):
data['ansible_task_id'] = self.task_id
data['ansible_result'] = self._dump_results(result._result)
- if (self.ls_format_version == "v2"):
+ if self.ls_format_version == "v2":
self.logger.info("TASK SKIPPED | %s", task_name, extra=data)
else:
self.logger.info("ansible skipped", extra=data)
@@ -307,7 +305,7 @@ class CallbackModule(CallbackBase):
data['ansible_play_name'] = self.play_name
data['imported_file'] = imported_file
- if (self.ls_format_version == "v2"):
+ if self.ls_format_version == "v2":
self.logger.info("IMPORT | %s", imported_file, extra=data)
else:
self.logger.info("ansible import", extra=data)
@@ -321,7 +319,7 @@ class CallbackModule(CallbackBase):
data['ansible_play_name'] = self.play_name
data['imported_file'] = missing_file
- if (self.ls_format_version == "v2"):
+ if self.ls_format_version == "v2":
self.logger.info("NOT IMPORTED | %s", missing_file, extra=data)
else:
self.logger.info("ansible import", extra=data)
@@ -345,7 +343,7 @@ class CallbackModule(CallbackBase):
data['ansible_result'] = self._dump_results(result._result)
self.errors += 1
- if (self.ls_format_version == "v2"):
+ if self.ls_format_version == "v2":
self.logger.error(
"TASK FAILED | %s | HOST | %s | RESULT | %s",
task_name, self.hostname,
@@ -368,7 +366,7 @@ class CallbackModule(CallbackBase):
data['ansible_result'] = self._dump_results(result._result)
self.errors += 1
- if (self.ls_format_version == "v2"):
+ if self.ls_format_version == "v2":
self.logger.error(
"UNREACHABLE | %s | HOST | %s | RESULT | %s",
task_name, self.hostname,
@@ -391,7 +389,7 @@ class CallbackModule(CallbackBase):
data['ansible_result'] = self._dump_results(result._result)
self.errors += 1
- if (self.ls_format_version == "v2"):
+ if self.ls_format_version == "v2":
self.logger.error(
"ASYNC FAILED | %s | HOST | %s | RESULT | %s",
task_name, self.hostname,
diff --git a/plugins/callback/mail.py b/plugins/callback/mail.py
index 80bef26044..d05982cd61 100644
--- a/plugins/callback/mail.py
+++ b/plugins/callback/mail.py
@@ -11,7 +11,7 @@ name: mail
type: notification
short_description: Sends failure events through email
description:
- - This callback will report failures through email.
+ - This callback reports failures through email.
author:
- Dag Wieers (@dagwieers)
requirements:
@@ -212,7 +212,8 @@ class CallbackModule(CallbackBase):
if self.itembody:
body += self.itembody
elif result._result.get('failed_when_result') is True:
- fail_cond = self.indent('failed_when:\n- ' + '\n- '.join(result._task.failed_when))
+ fail_cond_list = '\n- '.join(result._task.failed_when)
+ fail_cond = self.indent(f"failed_when:\n- {fail_cond_list}")
body += f"due to the following condition:\n\n{fail_cond}\n\n"
elif result._result.get('msg'):
body += self.body_blob(result._result['msg'], 'message')
diff --git a/plugins/callback/null.py b/plugins/callback/null.py
index b59389e39a..0527c1c467 100644
--- a/plugins/callback/null.py
+++ b/plugins/callback/null.py
@@ -12,7 +12,7 @@ name: 'null'
type: stdout
requirements:
- set as main display callback
-short_description: do not display stuff to screen
+short_description: Do not display stuff to screen
description:
- This callback prevents outputting events to screen.
"""
diff --git a/plugins/callback/opentelemetry.py b/plugins/callback/opentelemetry.py
index 039408f301..80f24924b9 100644
--- a/plugins/callback/opentelemetry.py
+++ b/plugins/callback/opentelemetry.py
@@ -35,8 +35,8 @@ options:
- Whether to enable this callback only if the given environment variable exists and it is set to V(true).
- This is handy when you use Configuration as Code and want to send distributed traces if running in the CI rather when
running Ansible locally.
- - For such, it evaluates the given O(enable_from_environment) value as environment variable and if set to true this
- plugin will be enabled.
+ - For such, it evaluates the given O(enable_from_environment) value as environment variable and if set to V(true) this
+ plugin is enabled.
env:
- name: ANSIBLE_OPENTELEMETRY_ENABLE_FROM_ENVIRONMENT
ini:
@@ -143,6 +143,7 @@ from collections import OrderedDict
from os.path import basename
from ansible.errors import AnsibleError
+from ansible.module_utils.ansible_release import __version__ as ansible_version
from ansible.module_utils.six import raise_from
from ansible.module_utils.six.moves.urllib.parse import urlparse
from ansible.plugins.callback import CallbackBase
@@ -212,7 +213,6 @@ class HostData:
class OpenTelemetrySource(object):
def __init__(self, display):
self.ansible_playbook = ""
- self.ansible_version = None
self.session = str(uuid.uuid4())
self.host = socket.gethostname()
try:
@@ -260,9 +260,6 @@ class OpenTelemetrySource(object):
task = tasks_data[task_uuid]
- if self.ansible_version is None and hasattr(result, '_task_fields') and result._task_fields['args'].get('_ansible_version'):
- self.ansible_version = result._task_fields['args'].get('_ansible_version')
-
task.dump = dump
task.add_host(HostData(host_uuid, host_name, status, result))
@@ -310,8 +307,7 @@ class OpenTelemetrySource(object):
start_time=parent_start_time, kind=SpanKind.SERVER) as parent:
parent.set_status(status)
# Populate trace metadata attributes
- if self.ansible_version is not None:
- parent.set_attribute("ansible.version", self.ansible_version)
+ parent.set_attribute("ansible.version", ansible_version)
parent.set_attribute("ansible.session", self.session)
parent.set_attribute("ansible.host.name", self.host)
if self.ip_address is not None:
diff --git a/plugins/callback/print_task.py b/plugins/callback/print_task.py
new file mode 100644
index 0000000000..809baddb95
--- /dev/null
+++ b/plugins/callback/print_task.py
@@ -0,0 +1,64 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2025, Max Mitschke
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r"""
+name: print_task
+type: aggregate
+short_description: Prints playbook task snippet to job output
+description:
+ - This plugin prints the currently executing playbook task to the job output.
+version_added: 10.7.0
+requirements:
+ - enable in configuration
+"""
+
+EXAMPLES = r"""
+ansible.cfg: |-
+ # Enable plugin
+ [defaults]
+ callbacks_enabled=community.general.print_task
+"""
+
+from yaml import load, dump
+
+try:
+ from yaml import CSafeDumper as SafeDumper
+ from yaml import CSafeLoader as SafeLoader
+except ImportError:
+ from yaml import SafeDumper, SafeLoader
+
+from ansible.plugins.callback import CallbackBase
+
+
+class CallbackModule(CallbackBase):
+ """
+ This callback module tells you how long your plays ran for.
+ """
+ CALLBACK_VERSION = 2.0
+ CALLBACK_TYPE = 'aggregate'
+ CALLBACK_NAME = 'community.general.print_task'
+
+ CALLBACK_NEEDS_ENABLED = True
+
+ def __init__(self):
+ super(CallbackModule, self).__init__()
+ self._printed_message = False
+
+ def _print_task(self, task):
+ if hasattr(task, '_ds'):
+ task_snippet = load(str([task._ds.copy()]), Loader=SafeLoader)
+ task_yaml = dump(task_snippet, sort_keys=False, Dumper=SafeDumper)
+ self._display.display(f"\n{task_yaml}\n")
+ self._printed_message = True
+
+ def v2_playbook_on_task_start(self, task, is_conditional):
+ self._printed_message = False
+
+ def v2_runner_on_start(self, host, task):
+ if not self._printed_message:
+ self._print_task(task)
diff --git a/plugins/callback/say.py b/plugins/callback/say.py
index e6da490ec7..8a4e93f353 100644
--- a/plugins/callback/say.py
+++ b/plugins/callback/say.py
@@ -14,9 +14,9 @@ type: notification
requirements:
- whitelisting in configuration
- the C(/usr/bin/say) command line program (standard on macOS) or C(espeak) command line program
-short_description: notify using software speech synthesizer
+short_description: Notify using software speech synthesizer
description:
- - This plugin will use the C(say) or C(espeak) program to "speak" about play events.
+ - This plugin uses C(say) or C(espeak) to "speak" about play events.
"""
import platform
diff --git a/plugins/callback/selective.py b/plugins/callback/selective.py
index 9cc805d3cd..53d40671bc 100644
--- a/plugins/callback/selective.py
+++ b/plugins/callback/selective.py
@@ -12,7 +12,7 @@ name: selective
type: stdout
requirements:
- set as main display callback
-short_description: only print certain tasks
+short_description: Only print certain tasks
description:
- This callback only prints tasks that have been tagged with C(print_action) or that have failed. This allows operators
to focus on the tasks that provide value only.
diff --git a/plugins/callback/splunk.py b/plugins/callback/splunk.py
index 1d4534892a..c385050d67 100644
--- a/plugins/callback/splunk.py
+++ b/plugins/callback/splunk.py
@@ -11,7 +11,7 @@ type: notification
short_description: Sends task result events to Splunk HTTP Event Collector
author: "Stuart Hirst (!UNKNOWN) "
description:
- - This callback plugin will send task results as JSON formatted events to a Splunk HTTP collector.
+ - This callback plugin sends task results as JSON formatted events to a Splunk HTTP collector.
- The companion Splunk Monitoring & Diagnostics App is available here U(https://splunkbase.splunk.com/app/4023/).
- Credit to "Ryan Currah (@ryancurrah)" for original source upon which this is based.
requirements:
@@ -91,6 +91,7 @@ import getpass
from os.path import basename
+from ansible.module_utils.ansible_release import __version__ as ansible_version
from ansible.module_utils.urls import open_url
from ansible.parsing.ajson import AnsibleJSONEncoder
from ansible.plugins.callback import CallbackBase
@@ -104,7 +105,6 @@ class SplunkHTTPCollectorSource(object):
def __init__(self):
self.ansible_check_mode = False
self.ansible_playbook = ""
- self.ansible_version = ""
self.session = str(uuid.uuid4())
self.host = socket.gethostname()
self.ip_address = socket.gethostbyname(socket.gethostname())
@@ -114,10 +114,6 @@ class SplunkHTTPCollectorSource(object):
if result._task_fields['args'].get('_ansible_check_mode') is True:
self.ansible_check_mode = True
- if result._task_fields['args'].get('_ansible_version'):
- self.ansible_version = \
- result._task_fields['args'].get('_ansible_version')
-
if result._task._role:
ansible_role = str(result._task._role)
else:
@@ -143,7 +139,7 @@ class SplunkHTTPCollectorSource(object):
data['ip_address'] = self.ip_address
data['user'] = self.user
data['runtime'] = runtime
- data['ansible_version'] = self.ansible_version
+ data['ansible_version'] = ansible_version
data['ansible_check_mode'] = self.ansible_check_mode
data['ansible_host'] = result._host.name
data['ansible_playbook'] = self.ansible_playbook
diff --git a/plugins/callback/sumologic.py b/plugins/callback/sumologic.py
index 5c310d1c50..7a762c30e8 100644
--- a/plugins/callback/sumologic.py
+++ b/plugins/callback/sumologic.py
@@ -11,7 +11,7 @@ type: notification
short_description: Sends task result events to Sumologic
author: "Ryan Currah (@ryancurrah)"
description:
- - This callback plugin will send task results as JSON formatted events to a Sumologic HTTP collector source.
+ - This callback plugin sends task results as JSON formatted events to a Sumologic HTTP collector source.
requirements:
- Whitelisting this callback plugin
- 'Create a HTTP collector source in Sumologic and specify a custom timestamp format of V(yyyy-MM-dd HH:mm:ss ZZZZ) and
@@ -48,6 +48,7 @@ import getpass
from os.path import basename
+from ansible.module_utils.ansible_release import __version__ as ansible_version
from ansible.module_utils.urls import open_url
from ansible.parsing.ajson import AnsibleJSONEncoder
from ansible.plugins.callback import CallbackBase
@@ -61,7 +62,6 @@ class SumologicHTTPCollectorSource(object):
def __init__(self):
self.ansible_check_mode = False
self.ansible_playbook = ""
- self.ansible_version = ""
self.session = str(uuid.uuid4())
self.host = socket.gethostname()
self.ip_address = socket.gethostbyname(socket.gethostname())
@@ -71,10 +71,6 @@ class SumologicHTTPCollectorSource(object):
if result._task_fields['args'].get('_ansible_check_mode') is True:
self.ansible_check_mode = True
- if result._task_fields['args'].get('_ansible_version'):
- self.ansible_version = \
- result._task_fields['args'].get('_ansible_version')
-
if result._task._role:
ansible_role = str(result._task._role)
else:
@@ -92,7 +88,7 @@ class SumologicHTTPCollectorSource(object):
data['ip_address'] = self.ip_address
data['user'] = self.user
data['runtime'] = runtime
- data['ansible_version'] = self.ansible_version
+ data['ansible_version'] = ansible_version
data['ansible_check_mode'] = self.ansible_check_mode
data['ansible_host'] = result._host.name
data['ansible_playbook'] = self.ansible_playbook
diff --git a/plugins/callback/syslog_json.py b/plugins/callback/syslog_json.py
index 9e5c78c90c..cab3973be1 100644
--- a/plugins/callback/syslog_json.py
+++ b/plugins/callback/syslog_json.py
@@ -12,12 +12,12 @@ name: syslog_json
type: notification
requirements:
- whitelist in configuration
-short_description: sends JSON events to syslog
+short_description: Sends JSON events to syslog
description:
- This plugin logs ansible-playbook and ansible runs to a syslog server in JSON format.
options:
server:
- description: Syslog server that will receive the event.
+ description: Syslog server that receives the event.
type: str
env:
- name: SYSLOG_SERVER
diff --git a/plugins/callback/tasks_only.py b/plugins/callback/tasks_only.py
new file mode 100644
index 0000000000..f64c4c57db
--- /dev/null
+++ b/plugins/callback/tasks_only.py
@@ -0,0 +1,62 @@
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2025, Felix Fontein
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import annotations
+
+DOCUMENTATION = r"""
+author: Felix Fontein (@felixfontein)
+name: tasks_only
+type: stdout
+version_added: 11.1.0
+short_description: Only show tasks
+description:
+ - Removes play start and stats marker from P(ansible.builtin.default#callback)'s output.
+ - Can be used to generate output for documentation examples.
+ For this, the O(number_of_columns) option should be set to an explicit value.
+extends_documentation_fragment:
+ - default_callback
+options:
+ number_of_columns:
+ description:
+ - Sets the number of columns for Ansible's display.
+ type: int
+ env:
+ - name: ANSIBLE_COLLECTIONS_TASKS_ONLY_NUMBER_OF_COLUMNS
+"""
+
+EXAMPLES = r"""
+---
+# Enable callback in ansible.cfg:
+ansible_config: |-
+ [defaults]
+ stdout_callback = community.general.tasks_only
+
+---
+# Enable callback with environment variables:
+environment_variable: |-
+ ANSIBLE_STDOUT_CALLBACK=community.general.tasks_only
+"""
+
+from ansible.plugins.callback.default import CallbackModule as Default
+
+
+class CallbackModule(Default):
+ CALLBACK_VERSION = 2.0
+ CALLBACK_TYPE = 'stdout'
+ CALLBACK_NAME = 'community.general.tasks_only'
+
+ def v2_playbook_on_play_start(self, play):
+ pass
+
+ def v2_playbook_on_stats(self, stats):
+ pass
+
+ def set_options(self, *args, **kwargs):
+ result = super(CallbackModule, self).set_options(*args, **kwargs)
+ self.number_of_columns = self.get_option("number_of_columns")
+ if self.number_of_columns is not None:
+ self._display.columns = self.number_of_columns
+ return result
diff --git a/plugins/callback/unixy.py b/plugins/callback/unixy.py
index 48f9b2d1f0..8fd8c10c94 100644
--- a/plugins/callback/unixy.py
+++ b/plugins/callback/unixy.py
@@ -11,7 +11,7 @@ DOCUMENTATION = r"""
name: unixy
type: stdout
author: Al Bowles (@akatch)
-short_description: condensed Ansible output
+short_description: Condensed Ansible output
description:
- Consolidated Ansible output in the style of LINUX/UNIX startup logs.
extends_documentation_fragment:
diff --git a/plugins/callback/yaml.py b/plugins/callback/yaml.py
index 3393e363d5..f02840c9c6 100644
--- a/plugins/callback/yaml.py
+++ b/plugins/callback/yaml.py
@@ -12,7 +12,7 @@ name: yaml
type: stdout
short_description: YAML-ized Ansible screen output
deprecated:
- removed_in: 13.0.0
+ removed_in: 12.0.0
why: Starting in ansible-core 2.13, the P(ansible.builtin.default#callback) callback has support for printing output in
YAML format.
alternative: Use O(ansible.builtin.default#callback:result_format=yaml).
@@ -37,9 +37,9 @@ import yaml
import json
import re
import string
+from collections.abc import Mapping, Sequence
from ansible.module_utils.common.text.converters import to_text
-from ansible.parsing.yaml.dumper import AnsibleDumper
from ansible.plugins.callback import strip_internal_keys, module_response_deepcopy
from ansible.plugins.callback.default import CallbackModule as Default
@@ -53,77 +53,80 @@ def should_use_block(value):
return False
+def adjust_str_value_for_block(value):
+ # we care more about readable than accuracy, so...
+ # ...no trailing space
+ value = value.rstrip()
+ # ...and non-printable characters
+ value = ''.join(x for x in value if x in string.printable or ord(x) >= 0xA0)
+ # ...tabs prevent blocks from expanding
+ value = value.expandtabs()
+ # ...and odd bits of whitespace
+ value = re.sub(r'[\x0b\x0c\r]', '', value)
+ # ...as does trailing space
+ value = re.sub(r' +\n', '\n', value)
+ return value
+
+
+def create_string_node(tag, value, style, default_style):
+ if style is None:
+ if should_use_block(value):
+ style = '|'
+ value = adjust_str_value_for_block(value)
+ else:
+ style = default_style
+ return yaml.representer.ScalarNode(tag, value, style=style)
+
+
try:
+ from ansible.module_utils.common.yaml import HAS_LIBYAML
+ # import below was added in https://github.com/ansible/ansible/pull/85039,
+ # first contained in ansible-core 2.19.0b2:
+ from ansible.utils.vars import transform_to_native_types
+
+ if HAS_LIBYAML:
+ from yaml.cyaml import CSafeDumper as SafeDumper
+ else:
+ from yaml import SafeDumper
+
+ class MyDumper(SafeDumper):
+ def represent_scalar(self, tag, value, style=None):
+ """Uses block style for multi-line strings"""
+ node = create_string_node(tag, value, style, self.default_style)
+ if self.alias_key is not None:
+ self.represented_objects[self.alias_key] = node
+ return node
+
+except ImportError:
+ # In case transform_to_native_types cannot be imported, we either have ansible-core 2.19.0b1
+ # (or some random commit from the devel or stable-2.19 branch after merging the DT changes
+ # and before transform_to_native_types was added), or we have a version without the DT changes.
+
+ # Here we simply assume we have a version without the DT changes, and thus can continue as
+ # with ansible-core 2.18 and before.
+
+ transform_to_native_types = None
+
+ from ansible.parsing.yaml.dumper import AnsibleDumper
+
class MyDumper(AnsibleDumper): # pylint: disable=inherit-non-class
def represent_scalar(self, tag, value, style=None):
"""Uses block style for multi-line strings"""
- if style is None:
- if should_use_block(value):
- style = '|'
- # we care more about readable than accuracy, so...
- # ...no trailing space
- value = value.rstrip()
- # ...and non-printable characters
- value = ''.join(x for x in value if x in string.printable or ord(x) >= 0xA0)
- # ...tabs prevent blocks from expanding
- value = value.expandtabs()
- # ...and odd bits of whitespace
- value = re.sub(r'[\x0b\x0c\r]', '', value)
- # ...as does trailing space
- value = re.sub(r' +\n', '\n', value)
- else:
- style = self.default_style
- node = yaml.representer.ScalarNode(tag, value, style=style)
+ node = create_string_node(tag, value, style, self.default_style)
if self.alias_key is not None:
self.represented_objects[self.alias_key] = node
return node
-except: # noqa: E722, pylint: disable=bare-except
- # This happens with Data Tagging, see https://github.com/ansible/ansible/issues/84781
- # Until there is a better solution we'll resort to using ansible-core internals.
- from ansible._internal._yaml import _dumper
- import typing as t
- class MyDumper(_dumper._BaseDumper):
- # This code is mostly taken from ansible._internal._yaml._dumper
- @classmethod
- def _register_representers(cls) -> None:
- cls.add_multi_representer(_dumper.AnsibleTaggedObject, cls.represent_ansible_tagged_object)
- cls.add_multi_representer(_dumper.Tripwire, cls.represent_tripwire)
- cls.add_multi_representer(_dumper.c.Mapping, _dumper.SafeRepresenter.represent_dict)
- cls.add_multi_representer(_dumper.c.Sequence, _dumper.SafeRepresenter.represent_list)
- def represent_ansible_tagged_object(self, data):
- if ciphertext := _dumper.VaultHelper.get_ciphertext(data, with_tags=False):
- return self.represent_scalar('!vault', ciphertext, style='|')
-
- return self.represent_data(_dumper.AnsibleTagHelper.as_native_type(data)) # automatically decrypts encrypted strings
-
- def represent_tripwire(self, data: _dumper.Tripwire) -> t.NoReturn:
- data.trip()
-
- # The following function is the same as in the try/except
- def represent_scalar(self, tag, value, style=None):
- """Uses block style for multi-line strings"""
- if style is None:
- if should_use_block(value):
- style = '|'
- # we care more about readable than accuracy, so...
- # ...no trailing space
- value = value.rstrip()
- # ...and non-printable characters
- value = ''.join(x for x in value if x in string.printable or ord(x) >= 0xA0)
- # ...tabs prevent blocks from expanding
- value = value.expandtabs()
- # ...and odd bits of whitespace
- value = re.sub(r'[\x0b\x0c\r]', '', value)
- # ...as does trailing space
- value = re.sub(r' +\n', '\n', value)
- else:
- style = self.default_style
- node = yaml.representer.ScalarNode(tag, value, style=style)
- if self.alias_key is not None:
- self.represented_objects[self.alias_key] = node
- return node
+def transform_recursively(value, transform):
+ # Since 2.19.0b7, this should no longer be needed:
+ # https://github.com/ansible/ansible/issues/85325
+ # https://github.com/ansible/ansible/pull/85389
+ if isinstance(value, Mapping):
+ return {transform(k): transform(v) for k, v in value.items()}
+ if isinstance(value, Sequence) and not isinstance(value, (str, bytes)):
+ return [transform(e) for e in value]
+ return transform(value)
class CallbackModule(Default):
@@ -180,6 +183,8 @@ class CallbackModule(Default):
if abridged_result:
dumped += '\n'
+ if transform_to_native_types is not None:
+ abridged_result = transform_recursively(abridged_result, lambda v: transform_to_native_types(v, redact=False))
dumped += to_text(yaml.dump(abridged_result, allow_unicode=True, width=1000, Dumper=MyDumper, default_flow_style=False))
# indent by a couple of spaces
diff --git a/plugins/connection/incus.py b/plugins/connection/incus.py
index 842ad8f924..4f73d05532 100644
--- a/plugins/connection/incus.py
+++ b/plugins/connection/incus.py
@@ -52,7 +52,7 @@ options:
remote_user:
description:
- User to login/authenticate as.
- - Can be set from the CLI via the C(--user) or C(-u) options.
+ - Can be set from the CLI with the C(--user) or C(-u) options.
type: string
default: root
vars:
@@ -155,11 +155,35 @@ class Connection(ConnectionBase):
stdout = to_text(stdout)
stderr = to_text(stderr)
- if stderr == "Error: Instance is not running.\n":
- raise AnsibleConnectionFailure(f"instance not running: {self._instance()}")
+ if stderr.startswith("Error: ") and stderr.rstrip().endswith(
+ ": Instance is not running"
+ ):
+ raise AnsibleConnectionFailure(
+ f"instance not running: {self._instance()} (remote={self.get_option('remote')}, project={self.get_option('project')})"
+ )
- if stderr == "Error: Instance not found\n":
- raise AnsibleConnectionFailure(f"instance not found: {self._instance()}")
+ if stderr.startswith("Error: ") and stderr.rstrip().endswith(
+ ": Instance not found"
+ ):
+ raise AnsibleConnectionFailure(
+ f"instance not found: {self._instance()} (remote={self.get_option('remote')}, project={self.get_option('project')})"
+ )
+
+ if (
+ stderr.startswith("Error: ")
+ and ": User does not have permission " in stderr
+ ):
+ raise AnsibleConnectionFailure(
+ f"instance access denied: {self._instance()} (remote={self.get_option('remote')}, project={self.get_option('project')})"
+ )
+
+ if (
+ stderr.startswith("Error: ")
+ and ": User does not have entitlement " in stderr
+ ):
+ raise AnsibleConnectionFailure(
+ f"instance access denied: {self._instance()} (remote={self.get_option('remote')}, project={self.get_option('project')})"
+ )
return process.returncode, stdout, stderr
diff --git a/plugins/connection/lxd.py b/plugins/connection/lxd.py
index 2670ed1b5f..2cc774a1d4 100644
--- a/plugins/connection/lxd.py
+++ b/plugins/connection/lxd.py
@@ -52,7 +52,7 @@ options:
remote_user:
description:
- User to login/authenticate as.
- - Can be set from the CLI via the C(--user) or C(-u) options.
+ - Can be set from the CLI with the C(--user) or C(-u) options.
type: string
default: root
vars:
diff --git a/plugins/connection/proxmox_pct_remote.py b/plugins/connection/proxmox_pct_remote.py
deleted file mode 100644
index c46090083e..0000000000
--- a/plugins/connection/proxmox_pct_remote.py
+++ /dev/null
@@ -1,857 +0,0 @@
-# -*- coding: utf-8 -*-
-# Derived from ansible/plugins/connection/paramiko_ssh.py (c) 2012, Michael DeHaan
-# Copyright (c) 2024 Nils Stein (@mietzen)
-# Copyright (c) 2024 Ansible Project
-# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-from __future__ import annotations
-
-DOCUMENTATION = r"""
-author: Nils Stein (@mietzen)
-name: proxmox_pct_remote
-short_description: Run tasks in Proxmox LXC container instances using pct CLI via SSH
-requirements:
- - paramiko
-description:
- - Run commands or put/fetch files to an existing Proxmox LXC container using pct CLI via SSH.
- - Uses the Python SSH implementation (Paramiko) to connect to the Proxmox host.
-version_added: "10.3.0"
-options:
- remote_addr:
- description:
- - Address of the remote target.
- default: inventory_hostname
- type: string
- vars:
- - name: inventory_hostname
- - name: ansible_host
- - name: ansible_ssh_host
- - name: ansible_paramiko_host
- port:
- description: Remote port to connect to.
- type: int
- default: 22
- ini:
- - section: defaults
- key: remote_port
- - section: paramiko_connection
- key: remote_port
- env:
- - name: ANSIBLE_REMOTE_PORT
- - name: ANSIBLE_REMOTE_PARAMIKO_PORT
- vars:
- - name: ansible_port
- - name: ansible_ssh_port
- - name: ansible_paramiko_port
- keyword:
- - name: port
- remote_user:
- description:
- - User to login/authenticate as.
- - Can be set from the CLI via the C(--user) or C(-u) options.
- type: string
- vars:
- - name: ansible_user
- - name: ansible_ssh_user
- - name: ansible_paramiko_user
- env:
- - name: ANSIBLE_REMOTE_USER
- - name: ANSIBLE_PARAMIKO_REMOTE_USER
- ini:
- - section: defaults
- key: remote_user
- - section: paramiko_connection
- key: remote_user
- keyword:
- - name: remote_user
- password:
- description:
- - Secret used to either login the SSH server or as a passphrase for SSH keys that require it.
- - Can be set from the CLI via the C(--ask-pass) option.
- type: string
- vars:
- - name: ansible_password
- - name: ansible_ssh_pass
- - name: ansible_ssh_password
- - name: ansible_paramiko_pass
- - name: ansible_paramiko_password
- use_rsa_sha2_algorithms:
- description:
- - Whether or not to enable RSA SHA2 algorithms for pubkeys and hostkeys.
- - On paramiko versions older than 2.9, this only affects hostkeys.
- - For behavior matching paramiko<2.9 set this to V(false).
- vars:
- - name: ansible_paramiko_use_rsa_sha2_algorithms
- ini:
- - {key: use_rsa_sha2_algorithms, section: paramiko_connection}
- env:
- - {name: ANSIBLE_PARAMIKO_USE_RSA_SHA2_ALGORITHMS}
- default: true
- type: boolean
- host_key_auto_add:
- description: "Automatically add host keys to C(~/.ssh/known_hosts)."
- env:
- - name: ANSIBLE_PARAMIKO_HOST_KEY_AUTO_ADD
- ini:
- - key: host_key_auto_add
- section: paramiko_connection
- type: boolean
- look_for_keys:
- default: True
- description: "Set to V(false) to disable searching for private key files in C(~/.ssh/)."
- env:
- - name: ANSIBLE_PARAMIKO_LOOK_FOR_KEYS
- ini:
- - {key: look_for_keys, section: paramiko_connection}
- type: boolean
- proxy_command:
- default: ""
- description:
- - Proxy information for running the connection via a jumphost.
- type: string
- env:
- - name: ANSIBLE_PARAMIKO_PROXY_COMMAND
- ini:
- - {key: proxy_command, section: paramiko_connection}
- vars:
- - name: ansible_paramiko_proxy_command
- pty:
- default: True
- description: "C(sudo) usually requires a PTY, V(true) to give a PTY and V(false) to not give a PTY."
- env:
- - name: ANSIBLE_PARAMIKO_PTY
- ini:
- - section: paramiko_connection
- key: pty
- type: boolean
- record_host_keys:
- default: True
- description: "Save the host keys to a file."
- env:
- - name: ANSIBLE_PARAMIKO_RECORD_HOST_KEYS
- ini:
- - section: paramiko_connection
- key: record_host_keys
- type: boolean
- host_key_checking:
- description: "Set this to V(false) if you want to avoid host key checking by the underlying tools Ansible uses to connect to the host."
- type: boolean
- default: true
- env:
- - name: ANSIBLE_HOST_KEY_CHECKING
- - name: ANSIBLE_SSH_HOST_KEY_CHECKING
- - name: ANSIBLE_PARAMIKO_HOST_KEY_CHECKING
- ini:
- - section: defaults
- key: host_key_checking
- - section: paramiko_connection
- key: host_key_checking
- vars:
- - name: ansible_host_key_checking
- - name: ansible_ssh_host_key_checking
- - name: ansible_paramiko_host_key_checking
- use_persistent_connections:
- description: "Toggles the use of persistence for connections."
- type: boolean
- default: False
- env:
- - name: ANSIBLE_USE_PERSISTENT_CONNECTIONS
- ini:
- - section: defaults
- key: use_persistent_connections
- banner_timeout:
- type: float
- default: 30
- description:
- - Configures, in seconds, the amount of time to wait for the SSH
- banner to be presented. This option is supported by paramiko
- version 1.15.0 or newer.
- ini:
- - section: paramiko_connection
- key: banner_timeout
- env:
- - name: ANSIBLE_PARAMIKO_BANNER_TIMEOUT
- timeout:
- type: int
- default: 10
- description: Number of seconds until the plugin gives up on failing to establish a TCP connection.
- ini:
- - section: defaults
- key: timeout
- - section: ssh_connection
- key: timeout
- - section: paramiko_connection
- key: timeout
- env:
- - name: ANSIBLE_TIMEOUT
- - name: ANSIBLE_SSH_TIMEOUT
- - name: ANSIBLE_PARAMIKO_TIMEOUT
- vars:
- - name: ansible_ssh_timeout
- - name: ansible_paramiko_timeout
- cli:
- - name: timeout
- lock_file_timeout:
- type: int
- default: 60
- description: Number of seconds until the plugin gives up on trying to write a lock file when writing SSH known host keys.
- vars:
- - name: ansible_lock_file_timeout
- env:
- - name: ANSIBLE_LOCK_FILE_TIMEOUT
- private_key_file:
- description:
- - Path to private key file to use for authentication.
- type: string
- ini:
- - section: defaults
- key: private_key_file
- - section: paramiko_connection
- key: private_key_file
- env:
- - name: ANSIBLE_PRIVATE_KEY_FILE
- - name: ANSIBLE_PARAMIKO_PRIVATE_KEY_FILE
- vars:
- - name: ansible_private_key_file
- - name: ansible_ssh_private_key_file
- - name: ansible_paramiko_private_key_file
- cli:
- - name: private_key_file
- option: "--private-key"
- vmid:
- description:
- - LXC Container ID
- type: int
- vars:
- - name: proxmox_vmid
- proxmox_become_method:
- description:
- - Become command used in proxmox
- type: str
- default: sudo
- vars:
- - name: proxmox_become_method
-notes:
- - >
- When NOT using this plugin as root, you need to have a become mechanism,
- e.g. C(sudo), installed on Proxmox and setup so we can run it without prompting for the password.
- Inside the container, we need a shell, for example C(sh) and the C(cat) command to be available in the C(PATH) for this plugin to work.
-"""
-
-EXAMPLES = r"""
-# --------------------------------------------------------------
-# Setup sudo with password less access to pct for user 'ansible':
-# --------------------------------------------------------------
-#
-# Open a Proxmox root shell and execute:
-# $ useradd -d /opt/ansible-pct -r -m -s /bin/sh ansible
-# $ mkdir -p /opt/ansible-pct/.ssh
-# $ ssh-keygen -t ed25519 -C 'ansible' -N "" -f /opt/ansible-pct/.ssh/ansible <<< y > /dev/null
-# $ cat /opt/ansible-pct/.ssh/ansible
-# $ mv /opt/ansible-pct/.ssh/ansible.pub /opt/ansible-pct/.ssh/authorized-keys
-# $ rm /opt/ansible-pct/.ssh/ansible*
-# $ chown -R ansible:ansible /opt/ansible-pct/.ssh
-# $ chmod 700 /opt/ansible-pct/.ssh
-# $ chmod 600 /opt/ansible-pct/.ssh/authorized-keys
-# $ echo 'ansible ALL = (root) NOPASSWD: /usr/sbin/pct' > /etc/sudoers.d/ansible_pct
-#
-# Save the displayed private key and add it to your ssh-agent
-#
-# Or use ansible:
-# ---
-# - name: Setup ansible-pct user and configure environment on Proxmox host
-# hosts: proxmox
-# become: true
-# gather_facts: false
-#
-# tasks:
-# - name: Create ansible user
-# ansible.builtin.user:
-# name: ansible
-# comment: Ansible User
-# home: /opt/ansible-pct
-# shell: /bin/sh
-# create_home: true
-# system: true
-#
-# - name: Create .ssh directory
-# ansible.builtin.file:
-# path: /opt/ansible-pct/.ssh
-# state: directory
-# owner: ansible
-# group: ansible
-# mode: '0700'
-#
-# - name: Generate SSH key for ansible user
-# community.crypto.openssh_keypair:
-# path: /opt/ansible-pct/.ssh/ansible
-# type: ed25519
-# comment: 'ansible'
-# force: true
-# mode: '0600'
-# owner: ansible
-# group: ansible
-#
-# - name: Set public key as authorized key
-# ansible.builtin.copy:
-# src: /opt/ansible-pct/.ssh/ansible.pub
-# dest: /opt/ansible-pct/.ssh/authorized-keys
-# remote_src: yes
-# owner: ansible
-# group: ansible
-# mode: '0600'
-#
-# - name: Add sudoers entry for ansible user
-# ansible.builtin.copy:
-# content: 'ansible ALL = (root) NOPASSWD: /usr/sbin/pct'
-# dest: /etc/sudoers.d/ansible_pct
-# owner: root
-# group: root
-# mode: '0440'
-#
-# - name: Fetch private SSH key to localhost
-# ansible.builtin.fetch:
-# src: /opt/ansible-pct/.ssh/ansible
-# dest: ~/.ssh/proxmox_ansible_private_key
-# flat: yes
-# fail_on_missing: true
-#
-# - name: Clean up generated SSH keys
-# ansible.builtin.file:
-# path: /opt/ansible-pct/.ssh/ansible*
-# state: absent
-#
-# - name: Configure private key permissions on localhost
-# hosts: localhost
-# tasks:
-# - name: Set permissions for fetched private key
-# ansible.builtin.file:
-# path: ~/.ssh/proxmox_ansible_private_key
-# mode: '0600'
-#
-# --------------------------------
-# Static inventory file: hosts.yml
-# --------------------------------
-# all:
-# children:
-# lxc:
-# hosts:
-# container-1:
-# ansible_host: 10.0.0.10
-# proxmox_vmid: 100
-# ansible_connection: community.general.proxmox_pct_remote
-# ansible_user: ansible
-# container-2:
-# ansible_host: 10.0.0.10
-# proxmox_vmid: 200
-# ansible_connection: community.general.proxmox_pct_remote
-# ansible_user: ansible
-# proxmox:
-# hosts:
-# proxmox-1:
-# ansible_host: 10.0.0.10
-#
-#
-# ---------------------------------------------
-# Dynamic inventory file: inventory.proxmox.yml
-# ---------------------------------------------
-# plugin: community.general.proxmox
-# url: https://10.0.0.10:8006
-# validate_certs: false
-# user: ansible@pam
-# token_id: ansible
-# token_secret: !vault |
-# $ANSIBLE_VAULT;1.1;AES256
-# ...
-
-# want_facts: true
-# exclude_nodes: true
-# filters:
-# - proxmox_vmtype == "lxc"
-# want_proxmox_nodes_ansible_host: false
-# compose:
-# ansible_host: "'10.0.0.10'"
-# ansible_connection: "'community.general.proxmox_pct_remote'"
-# ansible_user: "'ansible'"
-#
-#
-# ----------------------
-# Playbook: playbook.yml
-# ----------------------
----
-- hosts: lxc
- # On nodes with many containers you might want to deactivate the devices facts
- # or set `gather_facts: false` if you don't need them.
- # More info on gathering fact subsets:
- # https://docs.ansible.com/ansible/latest/collections/ansible/builtin/setup_module.html
- #
- # gather_facts: true
- # gather_subset:
- # - "!devices"
- tasks:
- - name: Ping LXC container
- ansible.builtin.ping:
-"""
-
-import os
-import pathlib
-import socket
-import tempfile
-import typing as t
-
-from ansible.errors import (
- AnsibleAuthenticationFailure,
- AnsibleConnectionFailure,
- AnsibleError,
-)
-from ansible_collections.community.general.plugins.module_utils._filelock import FileLock, LockTimeout
-from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text
-from ansible.module_utils.compat.paramiko import PARAMIKO_IMPORT_ERR, paramiko
-from ansible.module_utils.compat.version import LooseVersion
-from ansible.plugins.connection import ConnectionBase
-from ansible.utils.display import Display
-from ansible.utils.path import makedirs_safe
-from binascii import hexlify
-
-
-display = Display()
-
-
-def authenticity_msg(hostname: str, ktype: str, fingerprint: str) -> str:
- msg = f"""
- paramiko: The authenticity of host '{hostname}' can't be established.
- The {ktype} key fingerprint is {fingerprint}.
- Are you sure you want to continue connecting (yes/no)?
- """
- return msg
-
-
-MissingHostKeyPolicy: type = object
-if paramiko:
- MissingHostKeyPolicy = paramiko.MissingHostKeyPolicy
-
-
-class MyAddPolicy(MissingHostKeyPolicy):
- """
- Based on AutoAddPolicy in paramiko so we can determine when keys are added
-
- and also prompt for input.
-
- Policy for automatically adding the hostname and new host key to the
- local L{HostKeys} object, and saving it. This is used by L{SSHClient}.
- """
-
- def __init__(self, connection: Connection) -> None:
- self.connection = connection
- self._options = connection._options
-
- def missing_host_key(self, client, hostname, key) -> None:
-
- if all((self.connection.get_option('host_key_checking'), not self.connection.get_option('host_key_auto_add'))):
-
- fingerprint = hexlify(key.get_fingerprint())
- ktype = key.get_name()
-
- if self.connection.get_option('use_persistent_connections') or self.connection.force_persistence:
- # don't print the prompt string since the user cannot respond
- # to the question anyway
- raise AnsibleError(authenticity_msg(hostname, ktype, fingerprint)[1:92])
-
- inp = to_text(
- display.prompt_until(authenticity_msg(hostname, ktype, fingerprint), private=False),
- errors='surrogate_or_strict'
- )
-
- if inp.lower() not in ['yes', 'y', '']:
- raise AnsibleError('host connection rejected by user')
-
- key._added_by_ansible_this_time = True
-
- # existing implementation below:
- client._host_keys.add(hostname, key.get_name(), key)
-
- # host keys are actually saved in close() function below
- # in order to control ordering.
-
-
-class Connection(ConnectionBase):
- """ SSH based connections (paramiko) to Proxmox pct """
-
- transport = 'community.general.proxmox_pct_remote'
- _log_channel: str | None = None
-
- def __init__(self, play_context, new_stdin, *args, **kwargs):
- super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs)
-
- def _set_log_channel(self, name: str) -> None:
- """ Mimic paramiko.SSHClient.set_log_channel """
- self._log_channel = name
-
- def _parse_proxy_command(self, port: int = 22) -> dict[str, t.Any]:
- proxy_command = self.get_option('proxy_command') or None
-
- sock_kwarg = {}
- if proxy_command:
- replacers = {
- '%h': self.get_option('remote_addr'),
- '%p': port,
- '%r': self.get_option('remote_user')
- }
- for find, replace in replacers.items():
- proxy_command = proxy_command.replace(find, str(replace))
- try:
- sock_kwarg = {'sock': paramiko.ProxyCommand(proxy_command)}
- display.vvv(f'CONFIGURE PROXY COMMAND FOR CONNECTION: {proxy_command}', host=self.get_option('remote_addr'))
- except AttributeError:
- display.warning('Paramiko ProxyCommand support unavailable. '
- 'Please upgrade to Paramiko 1.9.0 or newer. '
- 'Not using configured ProxyCommand')
-
- return sock_kwarg
-
- def _connect(self) -> Connection:
- """ activates the connection object """
-
- if paramiko is None:
- raise AnsibleError(f'paramiko is not installed: {to_native(PARAMIKO_IMPORT_ERR)}')
-
- port = self.get_option('port')
- display.vvv(f'ESTABLISH PARAMIKO SSH CONNECTION FOR USER: {self.get_option("remote_user")} on PORT {to_text(port)} TO {self.get_option("remote_addr")}',
- host=self.get_option('remote_addr'))
-
- ssh = paramiko.SSHClient()
-
- # Set pubkey and hostkey algorithms to disable, the only manipulation allowed currently
- # is keeping or omitting rsa-sha2 algorithms
- # default_keys: t.Tuple[str] = ()
- paramiko_preferred_pubkeys = getattr(paramiko.Transport, '_preferred_pubkeys', ())
- paramiko_preferred_hostkeys = getattr(paramiko.Transport, '_preferred_keys', ())
- use_rsa_sha2_algorithms = self.get_option('use_rsa_sha2_algorithms')
- disabled_algorithms: t.Dict[str, t.Iterable[str]] = {}
- if not use_rsa_sha2_algorithms:
- if paramiko_preferred_pubkeys:
- disabled_algorithms['pubkeys'] = tuple(a for a in paramiko_preferred_pubkeys if 'rsa-sha2' in a)
- if paramiko_preferred_hostkeys:
- disabled_algorithms['keys'] = tuple(a for a in paramiko_preferred_hostkeys if 'rsa-sha2' in a)
-
- # override paramiko's default logger name
- if self._log_channel is not None:
- ssh.set_log_channel(self._log_channel)
-
- self.keyfile = os.path.expanduser('~/.ssh/known_hosts')
-
- if self.get_option('host_key_checking'):
- for ssh_known_hosts in ('/etc/ssh/ssh_known_hosts', '/etc/openssh/ssh_known_hosts'):
- try:
- ssh.load_system_host_keys(ssh_known_hosts)
- break
- except IOError:
- pass # file was not found, but not required to function
- except paramiko.hostkeys.InvalidHostKey as e:
- raise AnsibleConnectionFailure(f'Invalid host key: {to_text(e.line)}')
- try:
- ssh.load_system_host_keys()
- except paramiko.hostkeys.InvalidHostKey as e:
- raise AnsibleConnectionFailure(f'Invalid host key: {to_text(e.line)}')
-
- ssh_connect_kwargs = self._parse_proxy_command(port)
- ssh.set_missing_host_key_policy(MyAddPolicy(self))
- conn_password = self.get_option('password')
- allow_agent = True
-
- if conn_password is not None:
- allow_agent = False
-
- try:
- key_filename = None
- if self.get_option('private_key_file'):
- key_filename = os.path.expanduser(self.get_option('private_key_file'))
-
- # paramiko 2.2 introduced auth_timeout parameter
- if LooseVersion(paramiko.__version__) >= LooseVersion('2.2.0'):
- ssh_connect_kwargs['auth_timeout'] = self.get_option('timeout')
-
- # paramiko 1.15 introduced banner timeout parameter
- if LooseVersion(paramiko.__version__) >= LooseVersion('1.15.0'):
- ssh_connect_kwargs['banner_timeout'] = self.get_option('banner_timeout')
-
- ssh.connect(
- self.get_option('remote_addr').lower(),
- username=self.get_option('remote_user'),
- allow_agent=allow_agent,
- look_for_keys=self.get_option('look_for_keys'),
- key_filename=key_filename,
- password=conn_password,
- timeout=self.get_option('timeout'),
- port=port,
- disabled_algorithms=disabled_algorithms,
- **ssh_connect_kwargs,
- )
- except paramiko.ssh_exception.BadHostKeyException as e:
- raise AnsibleConnectionFailure(f'host key mismatch for {to_text(e.hostname)}')
- except paramiko.ssh_exception.AuthenticationException as e:
- msg = f'Failed to authenticate: {e}'
- raise AnsibleAuthenticationFailure(msg)
- except Exception as e:
- msg = to_text(e)
- if u'PID check failed' in msg:
- raise AnsibleError('paramiko version issue, please upgrade paramiko on the machine running ansible')
- elif u'Private key file is encrypted' in msg:
- msg = f'ssh {self.get_option("remote_user")}@{self.get_options("remote_addr")}:{port} : ' + \
- f'{msg}\nTo connect as a different user, use -u .'
- raise AnsibleConnectionFailure(msg)
- else:
- raise AnsibleConnectionFailure(msg)
- self.ssh = ssh
- self._connected = True
- return self
-
- def _any_keys_added(self) -> bool:
- for hostname, keys in self.ssh._host_keys.items():
- for keytype, key in keys.items():
- added_this_time = getattr(key, '_added_by_ansible_this_time', False)
- if added_this_time:
- return True
- return False
-
- def _save_ssh_host_keys(self, filename: str) -> None:
- """
- not using the paramiko save_ssh_host_keys function as we want to add new SSH keys at the bottom so folks
- don't complain about it :)
- """
-
- if not self._any_keys_added():
- return
-
- path = os.path.expanduser('~/.ssh')
- makedirs_safe(path)
-
- with open(filename, 'w') as f:
- for hostname, keys in self.ssh._host_keys.items():
- for keytype, key in keys.items():
- # was f.write
- added_this_time = getattr(key, '_added_by_ansible_this_time', False)
- if not added_this_time:
- f.write(f'{hostname} {keytype} {key.get_base64()}\n')
-
- for hostname, keys in self.ssh._host_keys.items():
- for keytype, key in keys.items():
- added_this_time = getattr(key, '_added_by_ansible_this_time', False)
- if added_this_time:
- f.write(f'{hostname} {keytype} {key.get_base64()}\n')
-
- def _build_pct_command(self, cmd: str) -> str:
- cmd = ['/usr/sbin/pct', 'exec', str(self.get_option('vmid')), '--', cmd]
- if self.get_option('remote_user') != 'root':
- cmd = [self.get_option('proxmox_become_method')] + cmd
- display.vvv(f'INFO Running as non root user: {self.get_option("remote_user")}, trying to run pct with become method: ' +
- f'{self.get_option("proxmox_become_method")}',
- host=self.get_option('remote_addr'))
- return ' '.join(cmd)
-
- def exec_command(self, cmd: str, in_data: bytes | None = None, sudoable: bool = True) -> tuple[int, bytes, bytes]:
- """ run a command on inside the LXC container """
-
- cmd = self._build_pct_command(cmd)
-
- super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
-
- bufsize = 4096
-
- try:
- self.ssh.get_transport().set_keepalive(5)
- chan = self.ssh.get_transport().open_session()
- except Exception as e:
- text_e = to_text(e)
- msg = 'Failed to open session'
- if text_e:
- msg += f': {text_e}'
- raise AnsibleConnectionFailure(to_native(msg))
-
- # sudo usually requires a PTY (cf. requiretty option), therefore
- # we give it one by default (pty=True in ansible.cfg), and we try
- # to initialise from the calling environment when sudoable is enabled
- if self.get_option('pty') and sudoable:
- chan.get_pty(term=os.getenv('TERM', 'vt100'), width=int(os.getenv('COLUMNS', 0)), height=int(os.getenv('LINES', 0)))
-
- display.vvv(f'EXEC {cmd}', host=self.get_option('remote_addr'))
-
- cmd = to_bytes(cmd, errors='surrogate_or_strict')
-
- no_prompt_out = b''
- no_prompt_err = b''
- become_output = b''
-
- try:
- chan.exec_command(cmd)
- if self.become and self.become.expect_prompt():
- password_prompt = False
- become_success = False
- while not (become_success or password_prompt):
- display.debug('Waiting for Privilege Escalation input')
-
- chunk = chan.recv(bufsize)
- display.debug(f'chunk is: {to_text(chunk)}')
- if not chunk:
- if b'unknown user' in become_output:
- n_become_user = to_native(self.become.get_option('become_user'))
- raise AnsibleError(f'user {n_become_user} does not exist')
- else:
- break
- # raise AnsibleError('ssh connection closed waiting for password prompt')
- become_output += chunk
-
- # need to check every line because we might get lectured
- # and we might get the middle of a line in a chunk
- for line in become_output.splitlines(True):
- if self.become.check_success(line):
- become_success = True
- break
- elif self.become.check_password_prompt(line):
- password_prompt = True
- break
-
- if password_prompt:
- if self.become:
- become_pass = self.become.get_option('become_pass')
- chan.sendall(to_bytes(become_pass, errors='surrogate_or_strict') + b'\n')
- else:
- raise AnsibleError('A password is required but none was supplied')
- else:
- no_prompt_out += become_output
- no_prompt_err += become_output
-
- if in_data:
- for i in range(0, len(in_data), bufsize):
- chan.send(in_data[i:i + bufsize])
- chan.shutdown_write()
- elif in_data == b'':
- chan.shutdown_write()
-
- except socket.timeout:
- raise AnsibleError('ssh timed out waiting for privilege escalation.\n' + to_text(become_output))
-
- stdout = b''.join(chan.makefile('rb', bufsize))
- stderr = b''.join(chan.makefile_stderr('rb', bufsize))
- returncode = chan.recv_exit_status()
-
- if 'pct: not found' in stderr.decode('utf-8'):
- raise AnsibleError(
- f'pct not found in path of host: {to_text(self.get_option("remote_addr"))}')
-
- return (returncode, no_prompt_out + stdout, no_prompt_out + stderr)
-
- def put_file(self, in_path: str, out_path: str) -> None:
- """ transfer a file from local to remote """
-
- display.vvv(f'PUT {in_path} TO {out_path}', host=self.get_option('remote_addr'))
- try:
- with open(in_path, 'rb') as f:
- data = f.read()
- returncode, stdout, stderr = self.exec_command(
- ' '.join([
- self._shell.executable, '-c',
- self._shell.quote(f'cat > {out_path}')]),
- in_data=data,
- sudoable=False)
- if returncode != 0:
- if 'cat: not found' in stderr.decode('utf-8'):
- raise AnsibleError(
- f'cat not found in path of container: {to_text(self.get_option("vmid"))}')
- raise AnsibleError(
- f'{to_text(stdout)}\n{to_text(stderr)}')
- except Exception as e:
- raise AnsibleError(
- f'error occurred while putting file from {in_path} to {out_path}!\n{to_text(e)}')
-
- def fetch_file(self, in_path: str, out_path: str) -> None:
- """ save a remote file to the specified path """
-
- display.vvv(f'FETCH {in_path} TO {out_path}', host=self.get_option('remote_addr'))
- try:
- returncode, stdout, stderr = self.exec_command(
- ' '.join([
- self._shell.executable, '-c',
- self._shell.quote(f'cat {in_path}')]),
- sudoable=False)
- if returncode != 0:
- if 'cat: not found' in stderr.decode('utf-8'):
- raise AnsibleError(
- f'cat not found in path of container: {to_text(self.get_option("vmid"))}')
- raise AnsibleError(
- f'{to_text(stdout)}\n{to_text(stderr)}')
- with open(out_path, 'wb') as f:
- f.write(stdout)
- except Exception as e:
- raise AnsibleError(
- f'error occurred while fetching file from {in_path} to {out_path}!\n{to_text(e)}')
-
- def reset(self) -> None:
- """ reset the connection """
-
- if not self._connected:
- return
- self.close()
- self._connect()
-
- def close(self) -> None:
- """ terminate the connection """
-
- if self.get_option('host_key_checking') and self.get_option('record_host_keys') and self._any_keys_added():
- # add any new SSH host keys -- warning -- this could be slow
- # (This doesn't acquire the connection lock because it needs
- # to exclude only other known_hosts writers, not connections
- # that are starting up.)
- lockfile = os.path.basename(self.keyfile)
- dirname = os.path.dirname(self.keyfile)
- makedirs_safe(dirname)
- tmp_keyfile_name = None
- try:
- with FileLock().lock_file(lockfile, dirname, self.get_option('lock_file_timeout')):
- # just in case any were added recently
-
- self.ssh.load_system_host_keys()
- self.ssh._host_keys.update(self.ssh._system_host_keys)
-
- # gather information about the current key file, so
- # we can ensure the new file has the correct mode/owner
-
- key_dir = os.path.dirname(self.keyfile)
- if os.path.exists(self.keyfile):
- key_stat = os.stat(self.keyfile)
- mode = key_stat.st_mode & 0o777
- uid = key_stat.st_uid
- gid = key_stat.st_gid
- else:
- mode = 0o644
- uid = os.getuid()
- gid = os.getgid()
-
- # Save the new keys to a temporary file and move it into place
- # rather than rewriting the file. We set delete=False because
- # the file will be moved into place rather than cleaned up.
-
- with tempfile.NamedTemporaryFile(dir=key_dir, delete=False) as tmp_keyfile:
- tmp_keyfile_name = tmp_keyfile.name
- os.chmod(tmp_keyfile_name, mode)
- os.chown(tmp_keyfile_name, uid, gid)
- self._save_ssh_host_keys(tmp_keyfile_name)
-
- os.rename(tmp_keyfile_name, self.keyfile)
- except LockTimeout:
- raise AnsibleError(
- f'writing lock file for {self.keyfile} ran in to the timeout of {self.get_option("lock_file_timeout")}s')
- except paramiko.hostkeys.InvalidHostKey as e:
- raise AnsibleConnectionFailure(f'Invalid host key: {e.line}')
- except Exception as e:
- # unable to save keys, including scenario when key was invalid
- # and caught earlier
- raise AnsibleError(
- f'error occurred while writing SSH host keys!\n{to_text(e)}')
- finally:
- if tmp_keyfile_name is not None:
- pathlib.Path(tmp_keyfile_name).unlink(missing_ok=True)
-
- self.ssh.close()
- self._connected = False
diff --git a/plugins/connection/wsl.py b/plugins/connection/wsl.py
index 886ac5c60f..92ffec52b3 100644
--- a/plugins/connection/wsl.py
+++ b/plugins/connection/wsl.py
@@ -11,11 +11,11 @@ from __future__ import annotations
DOCUMENTATION = r"""
author: Rui Lopes (@rgl)
name: wsl
-short_description: Run tasks in WSL distribution using wsl.exe CLI via SSH
+short_description: Run tasks in WSL distribution using wsl.exe CLI using SSH
requirements:
- paramiko
description:
- - Run commands or put/fetch files to an existing WSL distribution using wsl.exe CLI via SSH.
+ - Run commands or put/fetch files to an existing WSL distribution using wsl.exe CLI using SSH.
- Uses the Python SSH implementation (Paramiko) to connect to the WSL host.
version_added: "10.6.0"
options:
@@ -50,7 +50,7 @@ options:
remote_user:
description:
- User to login/authenticate as.
- - Can be set from the CLI via the C(--user) or C(-u) options.
+ - Can be set from the CLI with the C(--user) or C(-u) options.
type: string
vars:
- name: ansible_user
@@ -69,7 +69,7 @@ options:
password:
description:
- Secret used to either login the SSH server or as a passphrase for SSH keys that require it.
- - Can be set from the CLI via the C(--ask-pass) option.
+ - Can be set from the CLI with the C(--ask-pass) option.
type: string
vars:
- name: ansible_password
@@ -99,7 +99,7 @@ options:
section: paramiko_connection
type: boolean
look_for_keys:
- default: True
+ default: true
description: "Set to V(false) to disable searching for private key files in C(~/.ssh/)."
env:
- name: ANSIBLE_PARAMIKO_LOOK_FOR_KEYS
@@ -109,7 +109,7 @@ options:
proxy_command:
default: ""
description:
- - Proxy information for running the connection via a jumphost.
+ - Proxy information for running the connection through a jumphost.
- This option is supported by paramiko version 1.9.0 or newer.
type: string
env:
@@ -119,7 +119,7 @@ options:
vars:
- name: ansible_paramiko_proxy_command
record_host_keys:
- default: True
+ default: true
description: "Save the host keys to a file."
env:
- name: ANSIBLE_PARAMIKO_RECORD_HOST_KEYS
@@ -128,7 +128,8 @@ options:
key: record_host_keys
type: boolean
host_key_checking:
- description: "Set this to V(false) if you want to avoid host key checking by the underlying tools Ansible uses to connect to the host."
+ description: "Set this to V(false) if you want to avoid host key checking by the underlying tools Ansible uses to connect
+ to the host."
type: boolean
default: true
env:
@@ -147,7 +148,7 @@ options:
use_persistent_connections:
description: "Toggles the use of persistence for connections."
type: boolean
- default: False
+ default: false
env:
- name: ANSIBLE_USE_PERSISTENT_CONNECTIONS
ini:
@@ -157,8 +158,7 @@ options:
type: float
default: 30
description:
- - Configures, in seconds, the amount of time to wait for the SSH
- banner to be presented.
+ - Configures, in seconds, the amount of time to wait for the SSH banner to be presented.
- This option is supported by paramiko version 1.15.0 or newer.
ini:
- section: paramiko_connection
@@ -227,20 +227,20 @@ options:
- name: ansible_paramiko_user_known_hosts_file
wsl_distribution:
description:
- - WSL distribution name
+ - WSL distribution name.
type: string
required: true
vars:
- name: wsl_distribution
wsl_user:
description:
- - WSL distribution user
+ - WSL distribution user.
type: string
vars:
- name: wsl_user
become_user:
description:
- - WSL distribution user
+ - WSL distribution user.
type: string
default: root
vars:
@@ -248,7 +248,7 @@ options:
- name: ansible_become_user
become:
description:
- - whether to use the user defined by ansible_become_user.
+ - Whether to use the user defined by O(become_user).
type: bool
default: false
vars:
@@ -315,6 +315,7 @@ import pathlib
import shlex
import socket
import tempfile
+import traceback
import typing as t
from ansible.errors import (
@@ -323,9 +324,8 @@ from ansible.errors import (
AnsibleError,
)
from ansible_collections.community.general.plugins.module_utils._filelock import FileLock, LockTimeout
+from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text
-from ansible.module_utils.compat.paramiko import PARAMIKO_IMPORT_ERR, paramiko
-from ansible.module_utils.compat.version import LooseVersion
from ansible.playbook.play_context import PlayContext
from ansible.plugins.connection import ConnectionBase
from ansible.utils.display import Display
@@ -333,8 +333,15 @@ from ansible.utils.path import makedirs_safe
from binascii import hexlify
from subprocess import list2cmdline
+try:
+ import paramiko
+ PARAMIKO_IMPORT_ERR = None
+except ImportError:
+ paramiko = None
+ PARAMIKO_IMPORT_ERR = traceback.format_exc()
-if t.TYPE_CHECKING and paramiko:
+
+if t.TYPE_CHECKING and PARAMIKO_IMPORT_ERR is None:
from paramiko import MissingHostKeyPolicy
from paramiko.client import SSHClient
from paramiko.pkey import PKey
@@ -437,7 +444,7 @@ class Connection(ConnectionBase):
def _connect(self) -> Connection:
""" activates the connection object """
- if paramiko is None:
+ if PARAMIKO_IMPORT_ERR is not None:
raise AnsibleError(f'paramiko is not installed: {to_native(PARAMIKO_IMPORT_ERR)}')
port = self.get_option('port')
@@ -522,8 +529,10 @@ class Connection(ConnectionBase):
if u'PID check failed' in msg:
raise AnsibleError('paramiko version issue, please upgrade paramiko on the machine running ansible')
elif u'Private key file is encrypted' in msg:
- msg = f'ssh {self.get_option("remote_user")}@{self.get_options("remote_addr")}:{port} : ' + \
+ msg = (
+ f'ssh {self.get_option("remote_user")}@{self.get_options("remote_addr")}:{port} : '
f'{msg}\nTo connect as a different user, use -u .'
+ )
raise AnsibleConnectionFailure(msg)
else:
raise AnsibleConnectionFailure(msg)
@@ -656,7 +665,7 @@ class Connection(ConnectionBase):
chan.shutdown_write()
except socket.timeout:
- raise AnsibleError('ssh timed out waiting for privilege escalation.\n' + to_text(become_output))
+ raise AnsibleError(f'ssh timed out waiting for privilege escalation.\n{to_text(become_output)}')
stdout = b''.join(chan.makefile('rb', bufsize))
stderr = b''.join(chan.makefile_stderr('rb', bufsize))
diff --git a/plugins/doc_fragments/alicloud.py b/plugins/doc_fragments/alicloud.py
index 3b810852b7..cf7255b465 100644
--- a/plugins/doc_fragments/alicloud.py
+++ b/plugins/doc_fragments/alicloud.py
@@ -16,31 +16,31 @@ options:
alicloud_access_key:
description:
- Alibaba Cloud access key. If not set then the value of environment variable E(ALICLOUD_ACCESS_KEY), E(ALICLOUD_ACCESS_KEY_ID)
- will be used instead.
+ is used instead.
aliases: ['access_key_id', 'access_key']
type: str
alicloud_secret_key:
description:
- Alibaba Cloud secret key. If not set then the value of environment variable E(ALICLOUD_SECRET_KEY), E(ALICLOUD_SECRET_ACCESS_KEY)
- will be used instead.
+ is used instead.
aliases: ['secret_access_key', 'secret_key']
type: str
alicloud_region:
description:
- The Alibaba Cloud region to use. If not specified then the value of environment variable E(ALICLOUD_REGION), E(ALICLOUD_REGION_ID)
- will be used instead.
+ is used instead.
aliases: ['region', 'region_id']
required: true
type: str
alicloud_security_token:
description:
- The Alibaba Cloud security token. If not specified then the value of environment variable E(ALICLOUD_SECURITY_TOKEN)
- will be used instead.
+ is used instead.
aliases: ['security_token']
type: str
alicloud_assume_role:
description:
- - If provided with a role ARN, Ansible will attempt to assume this role using the supplied credentials.
+ - If provided with a role ARN, Ansible attempts to assume this role using the supplied credentials.
- The nested assume_role block supports C(alicloud_assume_role_arn), C(alicloud_assume_role_session_name), C(alicloud_assume_role_session_expiration)
and C(alicloud_assume_role_policy).
type: dict
@@ -48,7 +48,7 @@ options:
alicloud_assume_role_arn:
description:
- The Alibaba Cloud C(role_arn). The ARN of the role to assume. If ARN is set to an empty string, it does not perform
- role switching. It supports environment variable E(ALICLOUD_ASSUME_ROLE_ARN). ansible will execute with provided credentials.
+ role switching. It supports environment variable E(ALICLOUD_ASSUME_ROLE_ARN). Ansible executes with provided credentials.
aliases: ['assume_role_arn']
type: str
alicloud_assume_role_session_name:
@@ -68,7 +68,7 @@ options:
description:
- The RAM Role Name attached on a ECS instance for API operations. You can retrieve this from the 'Access Control' section
of the Alibaba Cloud console.
- - If you are running Ansible from an ECS instance with RAM Instance using RAM Role, Ansible will just access the metadata
+ - If you are running Ansible from an ECS instance with RAM Instance using RAM Role, Ansible just accesses the metadata
U(http://100.100.100.200/latest/meta-data/ram/security-credentials/) to obtain the STS credential.
This is a preferred approach over any other when running in ECS as you can avoid hard coding credentials. Instead
these are leased on-the-fly by Ansible which reduces the chance of leakage.
@@ -83,7 +83,7 @@ options:
description:
- This is the path to the shared credentials file. It can also be sourced from the E(ALICLOUD_SHARED_CREDENTIALS_FILE)
environment variable.
- - If this is not set and a profile is specified, C(~/.aliyun/config.json) will be used.
+ - If this is not set and a profile is specified, C(~/.aliyun/config.json) is used.
type: str
author:
- "He Guimin (@xiaozhu36)"
diff --git a/plugins/doc_fragments/attributes.py b/plugins/doc_fragments/attributes.py
index 2ab083eab2..18b02575c4 100644
--- a/plugins/doc_fragments/attributes.py
+++ b/plugins/doc_fragments/attributes.py
@@ -17,7 +17,7 @@ attributes:
check_mode:
description: Can run in C(check_mode) and return changed status prediction without modifying target.
diff_mode:
- description: Will return details on what has changed (or possibly needs changing in C(check_mode)), when in diff mode.
+ description: Returns details on what has changed (or possibly needs changing in C(check_mode)), when in diff mode.
"""
PLATFORM = r"""
@@ -32,14 +32,14 @@ attributes:
INFO_MODULE = r'''
options: {}
attributes:
- check_mode:
- support: full
- details:
- - This action does not modify state.
- diff_mode:
- support: N/A
- details:
- - This action does not modify state.
+ check_mode:
+ support: full
+ details:
+ - This action does not modify state.
+ diff_mode:
+ support: N/A
+ details:
+ - This action does not modify state.
'''
CONN = r"""
@@ -57,23 +57,23 @@ attributes:
options: {}
attributes:
facts:
- description: Action returns an C(ansible_facts) dictionary that will update existing host facts.
+ description: Action returns an C(ansible_facts) dictionary that updates existing host facts.
"""
# Should be used together with the standard fragment and the FACTS fragment
FACTS_MODULE = r'''
options: {}
attributes:
- check_mode:
- support: full
- details:
- - This action does not modify state.
- diff_mode:
- support: N/A
- details:
- - This action does not modify state.
- facts:
- support: full
+ check_mode:
+ support: full
+ details:
+ - This action does not modify state.
+ diff_mode:
+ support: N/A
+ details:
+ - This action does not modify state.
+ facts:
+ support: full
'''
FILES = r"""
diff --git a/plugins/doc_fragments/bitbucket.py b/plugins/doc_fragments/bitbucket.py
index e8b9ea4df8..65c4c47b51 100644
--- a/plugins/doc_fragments/bitbucket.py
+++ b/plugins/doc_fragments/bitbucket.py
@@ -16,17 +16,17 @@ options:
client_id:
description:
- The OAuth consumer key.
- - If not set the environment variable E(BITBUCKET_CLIENT_ID) will be used.
+ - If not set the environment variable E(BITBUCKET_CLIENT_ID) is used.
type: str
client_secret:
description:
- The OAuth consumer secret.
- - If not set the environment variable E(BITBUCKET_CLIENT_SECRET) will be used.
+ - If not set the environment variable E(BITBUCKET_CLIENT_SECRET) is used.
type: str
user:
description:
- The username.
- - If not set the environment variable E(BITBUCKET_USERNAME) will be used.
+ - If not set the environment variable E(BITBUCKET_USERNAME) is used.
- O(ignore:username) is an alias of O(user) since community.general 6.0.0. It was an alias of O(workspace) before.
type: str
version_added: 4.0.0
@@ -34,7 +34,7 @@ options:
password:
description:
- The App password.
- - If not set the environment variable E(BITBUCKET_PASSWORD) will be used.
+ - If not set the environment variable E(BITBUCKET_PASSWORD) is used.
type: str
version_added: 4.0.0
notes:
diff --git a/plugins/doc_fragments/clc.py b/plugins/doc_fragments/clc.py
deleted file mode 100644
index e193033af9..0000000000
--- a/plugins/doc_fragments/clc.py
+++ /dev/null
@@ -1,28 +0,0 @@
-# -*- coding: utf-8 -*-
-
-# Copyright (c) 2024, Alexei Znamensky
-# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-
-class ModuleDocFragment(object):
-
- # Standard documentation fragment
- DOCUMENTATION = r"""
-options: {}
-requirements:
- - requests >= 2.5.0
- - clc-sdk
-notes:
- - To use this module, it is required to set the below environment variables which enables access to the Centurylink Cloud.
- - E(CLC_V2_API_USERNAME), the account login ID for the Centurylink Cloud.
- - E(CLC_V2_API_PASSWORD), the account password for the Centurylink Cloud.
- - Alternatively, the module accepts the API token and account alias. The API token can be generated using the CLC account
- login and password using the HTTP API call @ U(https://api.ctl.io/v2/authentication/login).
- - E(CLC_V2_API_TOKEN), the API token generated from U(https://api.ctl.io/v2/authentication/login).
- - E(CLC_ACCT_ALIAS), the account alias associated with the Centurylink Cloud.
- - Users can set E(CLC_V2_API_URL) to specify an endpoint for pointing to a different CLC environment.
-"""
diff --git a/plugins/doc_fragments/dimensiondata.py b/plugins/doc_fragments/dimensiondata.py
index ece97addf0..890c4d741e 100644
--- a/plugins/doc_fragments/dimensiondata.py
+++ b/plugins/doc_fragments/dimensiondata.py
@@ -28,12 +28,12 @@ options:
mcp_user:
description:
- The username used to authenticate to the CloudControl API.
- - If not specified, will fall back to E(MCP_USER) from environment variable or C(~/.dimensiondata).
+ - If not specified, falls back to E(MCP_USER) from environment variable or C(~/.dimensiondata).
type: str
mcp_password:
description:
- The password used to authenticate to the CloudControl API.
- - If not specified, will fall back to E(MCP_PASSWORD) from environment variable or C(~/.dimensiondata).
+ - If not specified, falls back to E(MCP_PASSWORD) from environment variable or C(~/.dimensiondata).
- Required if O(mcp_user) is specified.
type: str
location:
@@ -43,7 +43,7 @@ options:
required: true
validate_certs:
description:
- - If V(false), SSL certificates will not be validated.
+ - If V(false), SSL certificates are not validated.
- This should only be used on private instances of the CloudControl API that use self-signed certificates.
type: bool
default: true
diff --git a/plugins/doc_fragments/django.py b/plugins/doc_fragments/django.py
index 3dcdb40171..5d01c8323e 100644
--- a/plugins/doc_fragments/django.py
+++ b/plugins/doc_fragments/django.py
@@ -18,13 +18,13 @@ options:
settings:
description:
- Specifies the settings module to use.
- - The value will be passed as is to the C(--settings) argument in C(django-admin).
+ - The value is passed as is to the C(--settings) argument in C(django-admin).
type: str
required: true
pythonpath:
description:
- Adds the given filesystem path to the Python import search path.
- - The value will be passed as is to the C(--pythonpath) argument in C(django-admin).
+ - The value is passed as is to the C(--pythonpath) argument in C(django-admin).
type: path
traceback:
description:
diff --git a/plugins/doc_fragments/emc.py b/plugins/doc_fragments/emc.py
index 7c62285a72..14dc7bc129 100644
--- a/plugins/doc_fragments/emc.py
+++ b/plugins/doc_fragments/emc.py
@@ -13,21 +13,21 @@ class ModuleDocFragment(object):
# Documentation fragment for VNX (emc_vnx)
EMC_VNX = r'''
options:
- sp_address:
- description:
- - Address of the SP of target/secondary storage.
- type: str
- required: true
- sp_user:
- description:
- - Username for accessing SP.
- type: str
- default: sysadmin
- sp_password:
- description:
- - password for accessing SP.
- type: str
- default: sysadmin
+ sp_address:
+ description:
+ - Address of the SP of target/secondary storage.
+ type: str
+ required: true
+ sp_user:
+ description:
+ - Username for accessing SP.
+ type: str
+ default: sysadmin
+ sp_password:
+ description:
+ - password for accessing SP.
+ type: str
+ default: sysadmin
requirements:
- An EMC VNX Storage device.
- storops (0.5.10 or greater). Install using C(pip install storops).
diff --git a/plugins/doc_fragments/hwc.py b/plugins/doc_fragments/hwc.py
index 3d478beb59..ea54c80c09 100644
--- a/plugins/doc_fragments/hwc.py
+++ b/plugins/doc_fragments/hwc.py
@@ -55,5 +55,5 @@ notes:
- For authentication, you can set domain using the E(ANSIBLE_HWC_DOMAIN) environment variable.
- For authentication, you can set project using the E(ANSIBLE_HWC_PROJECT) environment variable.
- For authentication, you can set region using the E(ANSIBLE_HWC_REGION) environment variable.
- - Environment variables values will only be used if the playbook values are not set.
+ - Environment variables values are only used when the playbook values are not set.
"""
diff --git a/plugins/doc_fragments/influxdb.py b/plugins/doc_fragments/influxdb.py
index 9cf47d340a..5dbebea846 100644
--- a/plugins/doc_fragments/influxdb.py
+++ b/plugins/doc_fragments/influxdb.py
@@ -20,13 +20,13 @@ options:
default: localhost
username:
description:
- - Username that will be used to authenticate against InfluxDB server.
+ - Username that is used to authenticate against InfluxDB server.
type: str
default: root
aliases: [login_username]
password:
description:
- - Password that will be used to authenticate against InfluxDB server.
+ - Password that is used to authenticate against InfluxDB server.
type: str
default: root
aliases: [login_password]
@@ -44,7 +44,7 @@ options:
version_added: '0.2.0'
validate_certs:
description:
- - If set to V(false), the SSL certificates will not be validated.
+ - If set to V(false), the SSL certificates are not validated.
- This should only set to V(false) used on personally controlled sites using self-signed certificates.
type: bool
default: true
@@ -55,11 +55,11 @@ options:
default: false
timeout:
description:
- - Number of seconds Requests will wait for client to establish a connection.
+ - Number of seconds Requests waits for client to establish a connection.
type: int
retries:
description:
- - Number of retries client will try before aborting.
+ - Number of retries client performs before aborting.
- V(0) indicates try until success.
- Only available when using C(python-influxdb) >= 4.1.0.
type: int
diff --git a/plugins/doc_fragments/ipa.py b/plugins/doc_fragments/ipa.py
index 0edb947aa5..63ea94b465 100644
--- a/plugins/doc_fragments/ipa.py
+++ b/plugins/doc_fragments/ipa.py
@@ -16,43 +16,43 @@ options:
ipa_port:
description:
- Port of FreeIPA / IPA server.
- - If the value is not specified in the task, the value of environment variable E(IPA_PORT) will be used instead.
+ - If the value is not specified in the task, the value of environment variable E(IPA_PORT) is used instead.
- If both the environment variable E(IPA_PORT) and the value are not specified in the task, then default value is set.
type: int
default: 443
ipa_host:
description:
- IP or hostname of IPA server.
- - If the value is not specified in the task, the value of environment variable E(IPA_HOST) will be used instead.
- - If both the environment variable E(IPA_HOST) and the value are not specified in the task, then DNS will be used to
- try to discover the FreeIPA server.
+ - If the value is not specified in the task, the value of environment variable E(IPA_HOST) is used instead.
+ - If both the environment variable E(IPA_HOST) and the value are not specified in the task, then DNS is used to try
+ to discover the FreeIPA server.
- The relevant entry needed in FreeIPA is the C(ipa-ca) entry.
- If neither the DNS entry, nor the environment E(IPA_HOST), nor the value are available in the task, then the default
- value will be used.
+ value is used.
type: str
default: ipa.example.com
ipa_user:
description:
- Administrative account used on IPA server.
- - If the value is not specified in the task, the value of environment variable E(IPA_USER) will be used instead.
+ - If the value is not specified in the task, the value of environment variable E(IPA_USER) is used instead.
- If both the environment variable E(IPA_USER) and the value are not specified in the task, then default value is set.
type: str
default: admin
ipa_pass:
description:
- Password of administrative user.
- - If the value is not specified in the task, the value of environment variable E(IPA_PASS) will be used instead.
+ - If the value is not specified in the task, the value of environment variable E(IPA_PASS) is used instead.
- Note that if the C(urllib_gssapi) library is available, it is possible to use GSSAPI to authenticate to FreeIPA.
- - If the environment variable E(KRB5CCNAME) is available, the module will use this kerberos credentials cache to authenticate
+ - If the environment variable E(KRB5CCNAME) is available, the module uses this Kerberos credentials cache to authenticate
to the FreeIPA server.
- - If the environment variable E(KRB5_CLIENT_KTNAME) is available, and E(KRB5CCNAME) is not; the module will use this
- kerberos keytab to authenticate.
+ - If the environment variable E(KRB5_CLIENT_KTNAME) is available, and E(KRB5CCNAME) is not; the module uses this Kerberos
+ keytab to authenticate.
- If GSSAPI is not available, the usage of O(ipa_pass) is required.
type: str
ipa_prot:
description:
- Protocol used by IPA server.
- - If the value is not specified in the task, the value of environment variable E(IPA_PROT) will be used instead.
+ - If the value is not specified in the task, the value of environment variable E(IPA_PROT) is used instead.
- If both the environment variable E(IPA_PROT) and the value are not specified in the task, then default value is set.
type: str
choices: [http, https]
@@ -60,7 +60,7 @@ options:
validate_certs:
description:
- This only applies if O(ipa_prot) is V(https).
- - If set to V(false), the SSL certificates will not be validated.
+ - If set to V(false), the SSL certificates are not validated.
- This should only set to V(false) used on personally controlled sites using self-signed certificates.
type: bool
default: true
@@ -68,7 +68,7 @@ options:
description:
- Specifies idle timeout (in seconds) for the connection.
- For bulk operations, you may want to increase this in order to avoid timeout from IPA server.
- - If the value is not specified in the task, the value of environment variable E(IPA_TIMEOUT) will be used instead.
+ - If the value is not specified in the task, the value of environment variable E(IPA_TIMEOUT) is used instead.
- If both the environment variable E(IPA_TIMEOUT) and the value are not specified in the task, then default value is
set.
type: int
diff --git a/plugins/doc_fragments/ldap.py b/plugins/doc_fragments/ldap.py
index 4dd5fd097f..abdb32adb7 100644
--- a/plugins/doc_fragments/ldap.py
+++ b/plugins/doc_fragments/ldap.py
@@ -14,9 +14,9 @@ class ModuleDocFragment(object):
# Standard LDAP documentation fragment
DOCUMENTATION = r"""
notes:
- - The default authentication settings will attempt to use a SASL EXTERNAL bind over a UNIX domain socket. This works well
- with the default Ubuntu install for example, which includes a C(cn=peercred,cn=external,cn=auth) ACL rule allowing root
- to modify the server configuration. If you need to use a simple bind to access your server, pass the credentials in O(bind_dn)
+ - The default authentication settings attempts to use a SASL EXTERNAL bind over a UNIX domain socket. This works well with
+ the default Ubuntu install for example, which includes a C(cn=peercred,cn=external,cn=auth) ACL rule allowing root to
+ modify the server configuration. If you need to use a simple bind to access your server, pass the credentials in O(bind_dn)
and O(bind_pw).
options:
bind_dn:
@@ -76,7 +76,7 @@ options:
default: false
validate_certs:
description:
- - If set to V(false), SSL certificates will not be validated.
+ - If set to V(false), SSL certificates are not validated.
- This should only be used on sites using self-signed certificates.
type: bool
default: true
@@ -90,9 +90,9 @@ options:
xorder_discovery:
description:
- Set the behavior on how to process Xordered DNs.
- - V(enable) will perform a C(ONELEVEL) search below the superior RDN to find the matching DN.
- - V(disable) will always use the DN unmodified (as passed by the O(dn) parameter).
- - V(auto) will only perform a search if the first RDN does not contain an index number (C({x})).
+ - V(enable) performs a C(ONELEVEL) search below the superior RDN to find the matching DN.
+ - V(disable) always uses the DN unmodified (as passed by the O(dn) parameter).
+ - V(auto) only performs a search if the first RDN does not contain an index number (C({x})).
type: str
choices: ['enable', 'auto', 'disable']
default: auto
diff --git a/plugins/doc_fragments/onepassword.py b/plugins/doc_fragments/onepassword.py
index a67c9e4dc1..6fb0e252c6 100644
--- a/plugins/doc_fragments/onepassword.py
+++ b/plugins/doc_fragments/onepassword.py
@@ -18,8 +18,8 @@ options:
aliases: ['vault_password']
type: str
section:
- description: Item section containing the field to retrieve (case-insensitive). If absent will return first match from
- any section.
+ description: Item section containing the field to retrieve (case-insensitive). If absent, returns first match from any
+ section.
domain:
description: Domain of 1Password.
default: '1password.com'
@@ -42,7 +42,7 @@ options:
- Only works with 1Password CLI version 2 or later.
type: str
vault:
- description: Vault containing the item to retrieve (case-insensitive). If absent will search all vaults.
+ description: Vault containing the item to retrieve (case-insensitive). If absent, searches all vaults.
type: str
connect_host:
description: The host for 1Password Connect. Must be used in combination with O(connect_token).
@@ -65,10 +65,9 @@ options:
- name: OP_SERVICE_ACCOUNT_TOKEN
version_added: 8.2.0
notes:
- - This lookup will use an existing 1Password session if one exists. If not, and you have already performed an initial sign
- in (meaning C(~/.op/config), C(~/.config/op/config) or C(~/.config/.op/config) exists), then only the O(master_password)
- is required. You may optionally specify O(subdomain) in this scenario, otherwise the last used subdomain will be used
- by C(op).
+ - This lookup uses an existing 1Password session if one exists. If not, and you have already performed an initial sign in
+ (meaning C(~/.op/config), C(~/.config/op/config) or C(~/.config/.op/config) exists), then only the O(master_password)
+ is required. You may optionally specify O(subdomain) in this scenario, otherwise the last used subdomain is used by C(op).
- This lookup can perform an initial login by providing O(subdomain), O(username), O(secret_key), and O(master_password).
- Can target a specific account by providing the O(account_id).
- Due to the B(very) sensitive nature of these credentials, it is B(highly) recommended that you only pass in the minimal
diff --git a/plugins/doc_fragments/oneview.py b/plugins/doc_fragments/oneview.py
index 3caabe4512..366e3e3e42 100644
--- a/plugins/doc_fragments/oneview.py
+++ b/plugins/doc_fragments/oneview.py
@@ -17,8 +17,8 @@ options:
description:
- Path to a JSON configuration file containing the OneView client configuration. The configuration file is optional
and when used should be present in the host running the ansible commands. If the file path is not provided, the configuration
- will be loaded from environment variables. For links to example configuration files or how to use the environment
- variables verify the notes section.
+ is loaded from environment variables. For links to example configuration files or how to use the environment variables
+ verify the notes section.
type: path
api_version:
description:
@@ -49,16 +49,16 @@ notes:
U(https://github.com/HewlettPackard/oneview-ansible/blob/master/examples/oneview_config-rename.json).'
- 'Check how to use environment variables for configuration at: U(https://github.com/HewlettPackard/oneview-ansible#environment-variables).'
- 'Additional Playbooks for the HPE OneView Ansible modules can be found at: U(https://github.com/HewlettPackard/oneview-ansible/tree/master/examples).'
- - 'The OneView API version used will directly affect returned and expected fields in resources. Information on setting the
- desired API version and can be found at: U(https://github.com/HewlettPackard/oneview-ansible#setting-your-oneview-version).'
+ - 'The OneView API version used directly affects returned and expected fields in resources. Information on setting the desired
+ API version and can be found at: U(https://github.com/HewlettPackard/oneview-ansible#setting-your-oneview-version).'
"""
VALIDATEETAG = r"""
options:
validate_etag:
description:
- - When the ETag Validation is enabled, the request will be conditionally processed only if the current ETag for the
- resource matches the ETag provided in the data.
+ - When the ETag Validation is enabled, the request is conditionally processed only if the current ETag for the resource
+ matches the ETag provided in the data.
type: bool
default: true
"""
diff --git a/plugins/doc_fragments/openswitch.py b/plugins/doc_fragments/openswitch.py
index f0e9e87c3d..30b477fbe7 100644
--- a/plugins/doc_fragments/openswitch.py
+++ b/plugins/doc_fragments/openswitch.py
@@ -21,8 +21,8 @@ options:
port:
description:
- Specifies the port to use when building the connection to the remote device. This value applies to either O(transport=cli)
- or O(transport=rest). The port value will default to the appropriate transport common port if none is provided in
- the task. (cli=22, http=80, https=443). Note this argument does not affect the SSH transport.
+ or O(transport=rest). The port value defaults to the appropriate transport common port if none is provided in the
+ task. (cli=22, http=80, https=443). Note this argument does not affect the SSH transport.
type: int
default: 0 (use common port)
username:
@@ -30,25 +30,24 @@ options:
- Configures the username to use to authenticate the connection to the remote device. This value is used to authenticate
either the CLI login or the eAPI authentication depending on which transport is used. Note this argument does not
affect the SSH transport. If the value is not specified in the task, the value of environment variable E(ANSIBLE_NET_USERNAME)
- will be used instead.
+ is used instead.
type: str
password:
description:
- Specifies the password to use to authenticate the connection to the remote device. This is a common argument used
for either O(transport=cli) or O(transport=rest). Note this argument does not affect the SSH transport. If the value
- is not specified in the task, the value of environment variable E(ANSIBLE_NET_PASSWORD) will be used instead.
+ is not specified in the task, the value of environment variable E(ANSIBLE_NET_PASSWORD) is used instead.
type: str
timeout:
description:
- Specifies the timeout in seconds for communicating with the network device for either connecting or sending commands.
- If the timeout is exceeded before the operation is completed, the module will error.
+ If the timeout is exceeded before the operation is completed, the module fails.
type: int
default: 10
ssh_keyfile:
description:
- Specifies the SSH key to use to authenticate the connection to the remote device. This argument is only used for O(transport=cli).
- If the value is not specified in the task, the value of environment variable E(ANSIBLE_NET_SSH_KEYFILE) will be used
- instead.
+ If the value is not specified in the task, the value of environment variable E(ANSIBLE_NET_SSH_KEYFILE) is used instead.
type: path
transport:
description:
diff --git a/plugins/doc_fragments/oracle.py b/plugins/doc_fragments/oracle.py
index 702b77f02f..08b2948bf3 100644
--- a/plugins/doc_fragments/oracle.py
+++ b/plugins/doc_fragments/oracle.py
@@ -54,9 +54,9 @@ options:
auth_type:
description:
- The type of authentication to use for making API requests. By default O(auth_type=api_key) based authentication is
- performed and the API key (see O(api_user_key_file)) in your config file will be used. If this 'auth_type' module
- option is not specified, the value of the E(OCI_ANSIBLE_AUTH_TYPE), if any, is used. Use O(auth_type=instance_principal)
- to use instance principal based authentication when running ansible playbooks within an OCI compute instance.
+ performed and the API key (see O(api_user_key_file)) in your config file is used. If O(auth_type) is not specified,
+ the value of the E(OCI_ANSIBLE_AUTH_TYPE), if any, is used. Use O(auth_type=instance_principal) to use instance principal
+ based authentication when running ansible playbooks within an OCI compute instance.
choices: ['api_key', 'instance_principal']
default: 'api_key'
type: str
diff --git a/plugins/doc_fragments/pipx.py b/plugins/doc_fragments/pipx.py
index b94495d4a1..dde13f6dd3 100644
--- a/plugins/doc_fragments/pipx.py
+++ b/plugins/doc_fragments/pipx.py
@@ -13,26 +13,22 @@ class ModuleDocFragment(object):
options:
global:
description:
- - The module will pass the C(--global) argument to C(pipx), to execute actions in global scope.
- - The C(--global) is only available in C(pipx>=1.6.0), so make sure to have a compatible version when using this option.
- Moreover, a nasty bug with C(--global) was fixed in C(pipx==1.7.0), so it is strongly recommended you used that version
- or newer.
+ - The module passes the C(--global) argument to C(pipx), to execute actions in global scope.
type: bool
default: false
executable:
description:
- Path to the C(pipx) installed in the system.
- - If not specified, the module will use C(python -m pipx) to run the tool, using the same Python interpreter as ansible
+ - If not specified, the module uses C(python -m pipx) to run the tool, using the same Python interpreter as ansible
itself.
type: path
+requirements:
+ - This module requires C(pipx) version 1.7.0 or above. Please note that C(pipx) 1.7.0 requires Python 3.8 or above.
notes:
- - This module requires C(pipx) version 0.16.2.1 or above. From community.general 11.0.0 onwards, the module will require
- C(pipx>=1.7.0).
- - Please note that C(pipx) requires Python 3.6 or above.
- This module does not install the C(pipx) python package, however that can be easily done with the module M(ansible.builtin.pip).
- This module does not require C(pipx) to be in the shell C(PATH), but it must be loadable by Python as a module.
- - This module will honor C(pipx) environment variables such as but not limited to E(PIPX_HOME) and E(PIPX_BIN_DIR) passed
- using the R(environment Ansible keyword, playbooks_environment).
+ - This module honors C(pipx) environment variables such as but not limited to E(PIPX_HOME) and E(PIPX_BIN_DIR) passed using
+ the R(environment Ansible keyword, playbooks_environment).
seealso:
- name: C(pipx) command manual page
description: Manual page for the command.
diff --git a/plugins/doc_fragments/proxmox.py b/plugins/doc_fragments/proxmox.py
deleted file mode 100644
index 4641c36d3e..0000000000
--- a/plugins/doc_fragments/proxmox.py
+++ /dev/null
@@ -1,84 +0,0 @@
-# -*- coding: utf-8 -*-
-# Copyright (c) Ansible project
-# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-
-class ModuleDocFragment(object):
- # Common parameters for Proxmox VE modules
- DOCUMENTATION = r"""
-options:
- api_host:
- description:
- - Specify the target host of the Proxmox VE cluster.
- type: str
- required: true
- api_port:
- description:
- - Specify the target port of the Proxmox VE cluster.
- - Uses the E(PROXMOX_PORT) environment variable if not specified.
- type: int
- required: false
- version_added: 9.1.0
- api_user:
- description:
- - Specify the user to authenticate with.
- type: str
- required: true
- api_password:
- description:
- - Specify the password to authenticate with.
- - You can use E(PROXMOX_PASSWORD) environment variable.
- type: str
- api_token_id:
- description:
- - Specify the token ID.
- - Requires C(proxmoxer>=1.1.0) to work.
- type: str
- version_added: 1.3.0
- api_token_secret:
- description:
- - Specify the token secret.
- - Requires C(proxmoxer>=1.1.0) to work.
- type: str
- version_added: 1.3.0
- validate_certs:
- description:
- - If V(false), SSL certificates will not be validated.
- - This should only be used on personally controlled sites using self-signed certificates.
- type: bool
- default: false
-requirements: ["proxmoxer", "requests"]
-"""
-
- SELECTION = r"""
-options:
- vmid:
- description:
- - Specifies the instance ID.
- - If not set the next available ID will be fetched from ProxmoxAPI.
- type: int
- node:
- description:
- - Proxmox VE node on which to operate.
- - Only required for O(state=present).
- - For every other states it will be autodiscovered.
- type: str
- pool:
- description:
- - Add the new VM to the specified pool.
- type: str
-"""
-
- ACTIONGROUP_PROXMOX = r"""
-options: {}
-attributes:
- action_group:
- description: Use C(group/community.general.proxmox) in C(module_defaults) to set defaults for this module.
- support: full
- membership:
- - community.general.proxmox
-"""
diff --git a/plugins/doc_fragments/redfish.py b/plugins/doc_fragments/redfish.py
index f2e6b37485..a20e064988 100644
--- a/plugins/doc_fragments/redfish.py
+++ b/plugins/doc_fragments/redfish.py
@@ -15,7 +15,7 @@ class ModuleDocFragment(object):
options:
validate_certs:
description:
- - If V(false), TLS/SSL certificates will not be validated.
+ - If V(false), TLS/SSL certificates are not validated.
- Set this to V(true) to enable certificate checking. Should be used together with O(ca_path).
type: bool
default: false
diff --git a/plugins/doc_fragments/redis.py b/plugins/doc_fragments/redis.py
index 149c018d79..c7bb88b81d 100644
--- a/plugins/doc_fragments/redis.py
+++ b/plugins/doc_fragments/redis.py
@@ -45,7 +45,7 @@ options:
default: true
ca_certs:
description:
- - Path to root certificates file. If not set and O(tls) is set to V(true), certifi ca-certificates will be used.
+ - Path to root certificates file. If not set and O(tls) is set to V(true), certifi's CA certificates are used.
type: str
client_cert_file:
description:
diff --git a/plugins/doc_fragments/utm.py b/plugins/doc_fragments/utm.py
index 3b2118485e..32c18e93b8 100644
--- a/plugins/doc_fragments/utm.py
+++ b/plugins/doc_fragments/utm.py
@@ -49,8 +49,8 @@ options:
state:
description:
- The desired state of the object.
- - V(present) will create or update an object.
- - V(absent) will delete an object if it was present.
+ - V(present) creates or updates an object.
+ - V(absent) deletes an object if present.
type: str
choices: [absent, present]
default: present
diff --git a/plugins/doc_fragments/xenserver.py b/plugins/doc_fragments/xenserver.py
index d1377e8964..f4e0946219 100644
--- a/plugins/doc_fragments/xenserver.py
+++ b/plugins/doc_fragments/xenserver.py
@@ -15,28 +15,27 @@ options:
hostname:
description:
- The hostname or IP address of the XenServer host or XenServer pool master.
- - If the value is not specified in the task, the value of environment variable E(XENSERVER_HOST) will be used instead.
+ - If the value is not specified in the task, the value of environment variable E(XENSERVER_HOST) is used instead.
type: str
default: localhost
aliases: [host, pool]
username:
description:
- The username to use for connecting to XenServer.
- - If the value is not specified in the task, the value of environment variable E(XENSERVER_USER) will be used instead.
+ - If the value is not specified in the task, the value of environment variable E(XENSERVER_USER) is used instead.
type: str
default: root
aliases: [admin, user]
password:
description:
- The password to use for connecting to XenServer.
- - If the value is not specified in the task, the value of environment variable E(XENSERVER_PASSWORD) will be used instead.
+ - If the value is not specified in the task, the value of environment variable E(XENSERVER_PASSWORD) is used instead.
type: str
aliases: [pass, pwd]
validate_certs:
description:
- Allows connection when SSL certificates are not valid. Set to V(false) when certificates are not trusted.
- - If the value is not specified in the task, the value of environment variable E(XENSERVER_VALIDATE_CERTS) will be used
- instead.
+ - If the value is not specified in the task, the value of environment variable E(XENSERVER_VALIDATE_CERTS) is used instead.
type: bool
default: true
"""
diff --git a/plugins/filter/jc.py b/plugins/filter/jc.py
index 48d53bcbd3..6a2feb93f0 100644
--- a/plugins/filter/jc.py
+++ b/plugins/filter/jc.py
@@ -143,11 +143,11 @@ def jc_filter(data, parser, quiet=True, raw=False):
# old API (jc v1.17.7 and lower)
else:
- jc_parser = importlib.import_module('jc.parsers.' + parser)
+ jc_parser = importlib.import_module(f'jc.parsers.{parser}')
return jc_parser.parse(data, quiet=quiet, raw=raw)
except Exception as e:
- raise AnsibleFilterError('Error in jc filter plugin: %s' % e)
+ raise AnsibleFilterError(f'Error in jc filter plugin: {e}')
class FilterModule(object):
diff --git a/plugins/filter/json_patch.yml b/plugins/filter/json_patch.yml
index 6fd411d6ff..42a0309202 100644
--- a/plugins/filter/json_patch.yml
+++ b/plugins/filter/json_patch.yml
@@ -64,7 +64,7 @@ EXAMPLES: |
ansible.builtin.debug:
msg: "{{ input | community.general.json_patch('add', '/1', {'baz': 'qux'}) }}"
vars:
- input: ["foo": { "one": 1 }, "bar": { "two": 2 }]
+ input: ["foo": { "one": 1 }, "bar": { "two": 2 }]
# => [{"foo": {"one": 1}}, {"baz": "qux"}, {"bar": {"two": 2}}]
- name: Insert a new key into a dictionary
@@ -94,7 +94,7 @@ EXAMPLES: |
vars:
input: {}
# => {"~/": "qux"}
-
+
- name: Add at the end of the array
ansible.builtin.debug:
msg: "{{ input | community.general.json_patch('add', '/-', 4) }}"
@@ -136,7 +136,7 @@ EXAMPLES: |
vars:
input: { "foo": { "one": 1 }, "bar": { "two": 2 } }
# => OK
-
+
- name: Unuccessful test
ansible.builtin.debug:
msg: "{{ input | community.general.json_patch('test', '/bar/two', 9) | ternary('OK', 'Failed') }}"
diff --git a/plugins/filter/json_query.py b/plugins/filter/json_query.py
index 8976694a94..9c0a83a481 100644
--- a/plugins/filter/json_query.py
+++ b/plugins/filter/json_query.py
@@ -124,10 +124,17 @@ def json_query(data, expr):
'json_query filter')
# Hack to handle Ansible Unsafe text, AnsibleMapping and AnsibleSequence
- # See issue: https://github.com/ansible-collections/community.general/issues/320
- jmespath.functions.REVERSE_TYPES_MAP['string'] = jmespath.functions.REVERSE_TYPES_MAP['string'] + ('AnsibleUnicode', 'AnsibleUnsafeText', )
- jmespath.functions.REVERSE_TYPES_MAP['array'] = jmespath.functions.REVERSE_TYPES_MAP['array'] + ('AnsibleSequence', )
- jmespath.functions.REVERSE_TYPES_MAP['object'] = jmespath.functions.REVERSE_TYPES_MAP['object'] + ('AnsibleMapping', )
+ # See issues https://github.com/ansible-collections/community.general/issues/320
+ # and https://github.com/ansible/ansible/issues/85600.
+ jmespath.functions.REVERSE_TYPES_MAP['string'] = jmespath.functions.REVERSE_TYPES_MAP['string'] + (
+ 'AnsibleUnicode', 'AnsibleUnsafeText', '_AnsibleTaggedStr',
+ )
+ jmespath.functions.REVERSE_TYPES_MAP['array'] = jmespath.functions.REVERSE_TYPES_MAP['array'] + (
+ 'AnsibleSequence', '_AnsibleLazyTemplateList',
+ )
+ jmespath.functions.REVERSE_TYPES_MAP['object'] = jmespath.functions.REVERSE_TYPES_MAP['object'] + (
+ 'AnsibleMapping', '_AnsibleLazyTemplateDict',
+ )
try:
return jmespath.search(expr, data)
except jmespath.exceptions.JMESPathError as e:
diff --git a/plugins/filter/lists_difference.yml b/plugins/filter/lists_difference.yml
index 9806a9f0bc..630e77cf0a 100644
--- a/plugins/filter/lists_difference.yml
+++ b/plugins/filter/lists_difference.yml
@@ -31,7 +31,7 @@ EXAMPLES: |
list1: [1, 2, 5, 3, 4, 10]
list2: [1, 2, 3, 4, 5, 11, 99]
# => [10]
-
+
- name: Return the difference of list1, list2 and list3.
ansible.builtin.debug:
msg: "{{ [list1, list2, list3] | community.general.lists_difference(flatten=true) }}"
diff --git a/plugins/filter/lists_intersect.yml b/plugins/filter/lists_intersect.yml
index 8253463dee..d2ea9483b1 100644
--- a/plugins/filter/lists_intersect.yml
+++ b/plugins/filter/lists_intersect.yml
@@ -31,7 +31,7 @@ EXAMPLES: |
list1: [1, 2, 5, 3, 4, 10]
list2: [1, 2, 3, 4, 5, 11, 99]
# => [1, 2, 5, 3, 4]
-
+
- name: Return the intersection of list1, list2 and list3.
ansible.builtin.debug:
msg: "{{ [list1, list2, list3] | community.general.lists_intersect(flatten=true) }}"
diff --git a/plugins/filter/lists_symmetric_difference.yml b/plugins/filter/lists_symmetric_difference.yml
index d985704c2c..abd8caab8a 100644
--- a/plugins/filter/lists_symmetric_difference.yml
+++ b/plugins/filter/lists_symmetric_difference.yml
@@ -31,7 +31,7 @@ EXAMPLES: |
list1: [1, 2, 5, 3, 4, 10]
list2: [1, 2, 3, 4, 5, 11, 99]
# => [10, 11, 99]
-
+
- name: Return the symmetric difference of list1, list2 and list3.
ansible.builtin.debug:
msg: "{{ [list1, list2, list3] | community.general.lists_symmetric_difference(flatten=true) }}"
diff --git a/plugins/filter/lists_union.yml b/plugins/filter/lists_union.yml
index ba69090836..8c1ffb4f87 100644
--- a/plugins/filter/lists_union.yml
+++ b/plugins/filter/lists_union.yml
@@ -32,7 +32,7 @@ EXAMPLES: |
list2: [1, 2, 3, 4, 5, 11, 99]
list3: [1, 2, 3, 4, 5, 10, 99, 101]
# => [1, 2, 5, 3, 4, 10, 11, 99, 101]
-
+
- name: Return the union of list1 and list2.
ansible.builtin.debug:
msg: "{{ [list1, list2] | community.general.lists_union(flatten=true) }}"
diff --git a/plugins/filter/replace_keys.py b/plugins/filter/replace_keys.py
index d47468bd3c..69fe02832b 100644
--- a/plugins/filter/replace_keys.py
+++ b/plugins/filter/replace_keys.py
@@ -34,7 +34,7 @@ options:
description:
- A key or key pattern to change.
- The interpretation of O(target[].before) depends on O(matching_parameter).
- - For a key that matches multiple O(target[].before)s, the B(first) matching O(target[].after) will be used.
+ - For a key that matches multiple O(target[].before)s, the B(first) matching O(target[].after) is used.
type: str
after:
description: A matching key change to.
diff --git a/plugins/filter/to_prettytable.py b/plugins/filter/to_prettytable.py
new file mode 100644
index 0000000000..269ac318ff
--- /dev/null
+++ b/plugins/filter/to_prettytable.py
@@ -0,0 +1,411 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2025, Timur Gadiev
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+
+DOCUMENTATION = r"""
+name: to_prettytable
+short_description: Format a list of dictionaries as an ASCII table
+version_added: "10.7.0"
+author: Timur Gadiev (@tgadiev)
+description:
+ - This filter takes a list of dictionaries and formats it as an ASCII table using the I(prettytable) Python library.
+requirements:
+ - prettytable
+options:
+ _input:
+ description: A list of dictionaries to format.
+ type: list
+ elements: dictionary
+ required: true
+ column_order:
+ description: List of column names to specify the order of columns in the table.
+ type: list
+ elements: string
+ header_names:
+ description: List of custom header names to use instead of dictionary keys.
+ type: list
+ elements: string
+ column_alignments:
+ description:
+ - Dictionary where keys are column names and values are alignment settings. Valid alignment values are C(left), C(center),
+ C(right), C(l), C(c), or C(r).
+ - "For example, V({'name': 'left', 'id': 'right'}) aligns the C(name) column to the left and the C(id) column to the
+ right."
+ type: dictionary
+"""
+
+EXAMPLES = r"""
+- name: Set a list of users
+ ansible.builtin.set_fact:
+ users:
+ - name: Alice
+ age: 25
+ role: admin
+ - name: Bob
+ age: 30
+ role: user
+
+- name: Display a list of users as a table
+ ansible.builtin.debug:
+ msg: >-
+ {{
+ users | community.general.to_prettytable
+ }}
+
+- name: Display a table with custom column ordering
+ ansible.builtin.debug:
+ msg: >-
+ {{
+ users | community.general.to_prettytable(
+ column_order=['role', 'name', 'age']
+ )
+ }}
+
+- name: Display a table with selective column output (only show name and role fields)
+ ansible.builtin.debug:
+ msg: >-
+ {{
+ users | community.general.to_prettytable(
+ column_order=['name', 'role']
+ )
+ }}
+
+- name: Display a table with custom headers
+ ansible.builtin.debug:
+ msg: >-
+ {{
+ users | community.general.to_prettytable(
+ header_names=['User Name', 'User Age', 'User Role']
+ )
+ }}
+
+- name: Display a table with custom alignments
+ ansible.builtin.debug:
+ msg: >-
+ {{
+ users | community.general.to_prettytable(
+ column_alignments={'name': 'center', 'age': 'right', 'role': 'left'}
+ )
+ }}
+
+- name: Combine multiple options
+ ansible.builtin.debug:
+ msg: >-
+ {{
+ users | community.general.to_prettytable(
+ column_order=['role', 'name', 'age'],
+ header_names=['Position', 'Full Name', 'Years'],
+ column_alignments={'name': 'center', 'age': 'right', 'role': 'left'}
+ )
+ }}
+"""
+
+RETURN = r"""
+_value:
+ description: The formatted ASCII table.
+ type: string
+"""
+
+try:
+ import prettytable
+ HAS_PRETTYTABLE = True
+except ImportError:
+ HAS_PRETTYTABLE = False
+
+from ansible.errors import AnsibleFilterError
+from ansible.module_utils._text import to_text
+from ansible.module_utils.six import string_types
+
+
+class TypeValidationError(AnsibleFilterError):
+ """Custom exception for type validation errors.
+
+ Args:
+ obj: The object with incorrect type
+ expected: Description of expected type
+ """
+ def __init__(self, obj, expected):
+ type_name = "string" if isinstance(obj, string_types) else type(obj).__name__
+ super().__init__(f"Expected {expected}, got a {type_name}")
+
+
+def _validate_list_param(param, param_name, ensure_strings=True):
+ """Validate a parameter is a list and optionally ensure all elements are strings.
+
+ Args:
+ param: The parameter to validate
+ param_name: The name of the parameter for error messages
+ ensure_strings: Whether to check that all elements are strings
+
+ Raises:
+ AnsibleFilterError: If validation fails
+ """
+ # Map parameter names to their original error message format
+ error_messages = {
+ "column_order": "a list of column names",
+ "header_names": "a list of header names"
+ }
+
+ # Use the specific error message if available, otherwise use a generic one
+ error_msg = error_messages.get(param_name, f"a list for {param_name}")
+
+ if not isinstance(param, list):
+ raise TypeValidationError(param, error_msg)
+
+ if ensure_strings:
+ for item in param:
+ if not isinstance(item, string_types):
+ # Maintain original error message format
+ if param_name == "column_order":
+ error_msg = "a string for column name"
+ elif param_name == "header_names":
+ error_msg = "a string for header name"
+ else:
+ error_msg = f"a string for {param_name} element"
+ raise TypeValidationError(item, error_msg)
+
+
+def _match_key(item_dict, lookup_key):
+ """Find a matching key in a dictionary, handling type conversion.
+
+ Args:
+ item_dict: Dictionary to search in
+ lookup_key: Key to look for, possibly needing type conversion
+
+ Returns:
+ The matching key or None if no match found
+ """
+ # Direct key match
+ if lookup_key in item_dict:
+ return lookup_key
+
+ # Try boolean conversion for 'true'/'false' strings
+ if isinstance(lookup_key, string_types):
+ if lookup_key.lower() == 'true' and True in item_dict:
+ return True
+ if lookup_key.lower() == 'false' and False in item_dict:
+ return False
+
+ # Try numeric conversion for string numbers
+ if lookup_key.isdigit() and int(lookup_key) in item_dict:
+ return int(lookup_key)
+
+ # No match found
+ return None
+
+
+def _build_key_maps(data):
+ """Build mappings between string keys and original keys.
+
+ Args:
+ data: List of dictionaries with keys to map
+
+ Returns:
+ Tuple of (key_map, reverse_key_map)
+ """
+ key_map = {}
+ reverse_key_map = {}
+
+ # Check if the data list is not empty
+ if not data:
+ return key_map, reverse_key_map
+
+ first_dict = data[0]
+ for orig_key in first_dict.keys():
+ # Store string version of the key
+ str_key = to_text(orig_key)
+ key_map[str_key] = orig_key
+ # Also store lowercase version for case-insensitive lookups
+ reverse_key_map[str_key.lower()] = orig_key
+
+ return key_map, reverse_key_map
+
+
+def _configure_alignments(table, field_names, column_alignments):
+ """Configure column alignments for the table.
+
+ Args:
+ table: The PrettyTable instance to configure
+ field_names: List of field names to align
+ column_alignments: Dict of column alignments
+ """
+ valid_alignments = {"left", "center", "right", "l", "c", "r"}
+
+ if not isinstance(column_alignments, dict):
+ return
+
+ for col_name, alignment in column_alignments.items():
+ if col_name in field_names:
+ # We already validated alignment is a string and a valid value in the main function
+ # Just apply it here
+ alignment = alignment.lower()
+ table.align[col_name] = alignment[0]
+
+
+def to_prettytable(data, *args, **kwargs):
+ """Convert a list of dictionaries to an ASCII table.
+
+ Args:
+ data: List of dictionaries to format
+ *args: Optional list of column names to specify column order
+ **kwargs: Optional keyword arguments:
+ - column_order: List of column names to specify the order
+ - header_names: List of custom header names
+ - column_alignments: Dict of column alignments (left, center, right)
+
+ Returns:
+ String containing the ASCII table
+ """
+ if not HAS_PRETTYTABLE:
+ raise AnsibleFilterError(
+ 'You need to install "prettytable" Python module to use this filter'
+ )
+
+ # === Input validation ===
+ # Validate list type
+ if not isinstance(data, list):
+ raise TypeValidationError(data, "a list of dictionaries")
+
+ # Validate dictionary items if list is not empty
+ if data and not all(isinstance(item, dict) for item in data):
+ invalid_item = next((item for item in data if not isinstance(item, dict)), None)
+ raise TypeValidationError(invalid_item, "all items in the list to be dictionaries")
+
+ # Get sample dictionary to determine fields - empty if no data
+ sample_dict = data[0] if data else {}
+ max_fields = len(sample_dict)
+
+ # === Process column order ===
+ # Handle both positional and keyword column_order
+ column_order = kwargs.pop('column_order', None)
+
+ # Check for conflict between args and column_order
+ if args and column_order is not None:
+ raise AnsibleFilterError("Cannot use both positional arguments and the 'column_order' keyword argument")
+
+ # Use positional args if provided
+ if args:
+ column_order = list(args)
+
+ # Validate column_order
+ if column_order is not None:
+ _validate_list_param(column_order, "column_order")
+
+ # Validate column_order doesn't exceed the number of fields (skip if data is empty)
+ if data and len(column_order) > max_fields:
+ raise AnsibleFilterError(
+ f"'column_order' has more elements ({len(column_order)}) than available fields in data ({max_fields})")
+
+ # === Process headers ===
+ # Determine field names and ensure they are strings
+ if column_order:
+ field_names = column_order
+ else:
+ # Use field names from first dictionary, ensuring all are strings
+ field_names = [to_text(k) for k in sample_dict]
+
+ # Process custom headers
+ header_names = kwargs.pop('header_names', None)
+ if header_names is not None:
+ _validate_list_param(header_names, "header_names")
+
+ # Validate header_names doesn't exceed the number of fields (skip if data is empty)
+ if data and len(header_names) > max_fields:
+ raise AnsibleFilterError(
+ f"'header_names' has more elements ({len(header_names)}) than available fields in data ({max_fields})")
+
+ # Validate that column_order and header_names have the same size if both provided
+ if column_order is not None and len(column_order) != len(header_names):
+ raise AnsibleFilterError(
+ f"'column_order' and 'header_names' must have the same number of elements. "
+ f"Got {len(column_order)} columns and {len(header_names)} headers.")
+
+ # === Process alignments ===
+ # Get column alignments and validate
+ column_alignments = kwargs.pop('column_alignments', {})
+ valid_alignments = {"left", "center", "right", "l", "c", "r"}
+
+ # Validate column_alignments is a dictionary
+ if not isinstance(column_alignments, dict):
+ raise TypeValidationError(column_alignments, "a dictionary for column_alignments")
+
+ # Validate column_alignments keys and values
+ for key, value in column_alignments.items():
+ # Check that keys are strings
+ if not isinstance(key, string_types):
+ raise TypeValidationError(key, "a string for column_alignments key")
+
+ # Check that values are strings
+ if not isinstance(value, string_types):
+ raise TypeValidationError(value, "a string for column_alignments value")
+
+ # Check that values are valid alignments
+ if value.lower() not in valid_alignments:
+ raise AnsibleFilterError(
+ f"Invalid alignment '{value}' in 'column_alignments'. "
+ f"Valid alignments are: {', '.join(sorted(valid_alignments))}")
+
+ # Validate column_alignments doesn't have more keys than fields (skip if data is empty)
+ if data and len(column_alignments) > max_fields:
+ raise AnsibleFilterError(
+ f"'column_alignments' has more elements ({len(column_alignments)}) than available fields in data ({max_fields})")
+
+ # Check for unknown parameters
+ if kwargs:
+ raise AnsibleFilterError(f"Unknown parameter(s) for to_prettytable filter: {', '.join(sorted(kwargs))}")
+
+ # === Build the table ===
+ table = prettytable.PrettyTable()
+
+ # Set the field names for display
+ display_names = header_names if header_names is not None else field_names
+ table.field_names = [to_text(name) for name in display_names]
+
+ # Configure alignments after setting field_names
+ _configure_alignments(table, display_names, column_alignments)
+
+ # Build key maps only if not using explicit column_order and we have data
+ key_map = {}
+ reverse_key_map = {}
+ if not column_order and data: # Only needed when using original dictionary keys and we have data
+ key_map, reverse_key_map = _build_key_maps(data)
+
+ # If we have an empty list with no custom parameters, return a simple empty table
+ if not data and not column_order and not header_names and not column_alignments:
+ return "++\n++"
+
+ # Process each row if we have data
+ for item in data:
+ row = []
+ for col in field_names:
+ # Try direct mapping first
+ if col in key_map:
+ row.append(item.get(key_map[col], ""))
+ else:
+ # Try to find a matching key in the item
+ matched_key = _match_key(item, col)
+ if matched_key is not None:
+ row.append(item.get(matched_key, ""))
+ else:
+ # Try case-insensitive lookup as last resort
+ lower_col = col.lower() if isinstance(col, string_types) else str(col).lower()
+ if lower_col in reverse_key_map:
+ row.append(item.get(reverse_key_map[lower_col], ""))
+ else:
+ # No match found
+ row.append("")
+ table.add_row(row)
+
+ return to_text(table)
+
+
+class FilterModule(object):
+ """Ansible core jinja2 filters."""
+
+ def filters(self):
+ return {
+ 'to_prettytable': to_prettytable
+ }
diff --git a/plugins/inventory/cobbler.py b/plugins/inventory/cobbler.py
index 4546bf8d6c..677e1a3ad5 100644
--- a/plugins/inventory/cobbler.py
+++ b/plugins/inventory/cobbler.py
@@ -5,114 +5,131 @@
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import annotations
-DOCUMENTATION = '''
- author: Orion Poplawski (@opoplawski)
- name: cobbler
- short_description: Cobbler inventory source
- version_added: 1.0.0
+DOCUMENTATION = r"""
+author: Orion Poplawski (@opoplawski)
+name: cobbler
+short_description: Cobbler inventory source
+version_added: 1.0.0
+description:
+ - Get inventory hosts from the cobbler service.
+ - 'Uses a configuration file as an inventory source, it must end in C(.cobbler.yml) or C(.cobbler.yaml) and have a C(plugin:
+ cobbler) entry.'
+ - Adds the primary IP addresses to C(cobbler_ipv4_address) and C(cobbler_ipv6_address) host variables if defined in Cobbler.
+ The primary IP address is defined as the management interface if defined, or the interface who's DNS name matches the
+ hostname of the system, or else the first interface found.
+extends_documentation_fragment:
+ - inventory_cache
+options:
+ plugin:
+ description: The name of this plugin, it should always be set to V(community.general.cobbler) for this plugin to recognize
+ it as its own.
+ type: string
+ required: true
+ choices: ['cobbler', 'community.general.cobbler']
+ url:
+ description: URL to cobbler.
+ type: string
+ default: 'http://cobbler/cobbler_api'
+ env:
+ - name: COBBLER_SERVER
+ user:
+ description: Cobbler authentication user.
+ type: string
+ required: false
+ env:
+ - name: COBBLER_USER
+ password:
+ description: Cobbler authentication password.
+ type: string
+ required: false
+ env:
+ - name: COBBLER_PASSWORD
+ cache_fallback:
+ description: Fallback to cached results if connection to cobbler fails.
+ type: boolean
+ default: false
+ connection_timeout:
+ description: Timeout to connect to cobbler server.
+ type: int
+ required: false
+ version_added: 10.7.0
+ exclude_mgmt_classes:
+ description: Management classes to exclude from inventory.
+ type: list
+ default: []
+ elements: str
+ version_added: 7.4.0
+ exclude_profiles:
description:
- - Get inventory hosts from the cobbler service.
- - "Uses a configuration file as an inventory source, it must end in C(.cobbler.yml) or C(.cobbler.yaml) and have a C(plugin: cobbler) entry."
- - Adds the primary IP addresses to C(cobbler_ipv4_address) and C(cobbler_ipv6_address) host variables if defined in Cobbler. The primary IP address is
- defined as the management interface if defined, or the interface who's DNS name matches the hostname of the system, or else the first interface found.
- extends_documentation_fragment:
- - inventory_cache
- options:
- plugin:
- description: The name of this plugin, it should always be set to V(community.general.cobbler) for this plugin to recognize it as its own.
- type: string
- required: true
- choices: [ 'cobbler', 'community.general.cobbler' ]
- url:
- description: URL to cobbler.
- type: string
- default: 'http://cobbler/cobbler_api'
- env:
- - name: COBBLER_SERVER
- user:
- description: Cobbler authentication user.
- type: string
- required: false
- env:
- - name: COBBLER_USER
- password:
- description: Cobbler authentication password.
- type: string
- required: false
- env:
- - name: COBBLER_PASSWORD
- cache_fallback:
- description: Fallback to cached results if connection to cobbler fails.
- type: boolean
- default: false
- exclude_mgmt_classes:
- description: Management classes to exclude from inventory.
- type: list
- default: []
- elements: str
- version_added: 7.4.0
- exclude_profiles:
- description:
- - Profiles to exclude from inventory.
- - Ignored if O(include_profiles) is specified.
- type: list
- default: []
- elements: str
- include_mgmt_classes:
- description: Management classes to include from inventory.
- type: list
- default: []
- elements: str
- version_added: 7.4.0
- include_profiles:
- description:
- - Profiles to include from inventory.
- - If specified, all other profiles will be excluded.
- - O(exclude_profiles) is ignored if O(include_profiles) is specified.
- type: list
- default: []
- elements: str
- version_added: 4.4.0
- inventory_hostname:
- description:
- - What to use for the ansible inventory hostname.
- - By default the networking hostname is used if defined, otherwise the DNS name of the management or first non-static interface.
- - If set to V(system), the cobbler system name is used.
- type: str
- choices: [ 'hostname', 'system' ]
- default: hostname
- version_added: 7.1.0
- group_by:
- description: Keys to group hosts by.
- type: list
- elements: string
- default: [ 'mgmt_classes', 'owners', 'status' ]
- group:
- description: Group to place all hosts into.
- default: cobbler
- group_prefix:
- description: Prefix to apply to cobbler groups.
- default: cobbler_
- want_facts:
- description: Toggle, if V(true) the plugin will retrieve host facts from the server.
- type: boolean
- default: true
- want_ip_addresses:
- description:
- - Toggle, if V(true) the plugin will add a C(cobbler_ipv4_addresses) and C(cobbleer_ipv6_addresses) dictionary to the defined O(group) mapping
- interface DNS names to IP addresses.
- type: boolean
- default: true
- version_added: 7.1.0
-'''
+ - Profiles to exclude from inventory.
+ - Ignored if O(include_profiles) is specified.
+ type: list
+ default: []
+ elements: str
+ include_mgmt_classes:
+ description: Management classes to include from inventory.
+ type: list
+ default: []
+ elements: str
+ version_added: 7.4.0
+ include_profiles:
+ description:
+ - Profiles to include from inventory.
+ - If specified, all other profiles are excluded.
+ - O(exclude_profiles) is ignored if O(include_profiles) is specified.
+ type: list
+ default: []
+ elements: str
+ version_added: 4.4.0
+ inventory_hostname:
+ description:
+ - What to use for the ansible inventory hostname.
+ - By default the networking hostname is used if defined, otherwise the DNS name of the management or first non-static
+ interface.
+ - If set to V(system), the cobbler system name is used.
+ type: str
+ choices: ['hostname', 'system']
+ default: hostname
+ version_added: 7.1.0
+ group_by:
+ description: Keys to group hosts by.
+ type: list
+ elements: string
+ default: ['mgmt_classes', 'owners', 'status']
+ group:
+ description: Group to place all hosts into.
+ default: cobbler
+ group_prefix:
+ description: Prefix to apply to cobbler groups.
+ default: cobbler_
+ want_facts:
+ description: Toggle, if V(true) the plugin retrieves all host facts from the server.
+ type: boolean
+ default: true
+ want_ip_addresses:
+ description:
+ - Toggle, if V(true) the plugin adds a C(cobbler_ipv4_addresses) and C(cobbler_ipv6_addresses) dictionary to the
+ defined O(group) mapping interface DNS names to IP addresses.
+ type: boolean
+ default: true
+ version_added: 7.1.0
+ facts_level:
+ description:
+ - Set to V(normal) to gather only system-level variables.
+ - Set to V(as_rendered) to gather all variables as rolled up by Cobbler.
+ type: string
+ choices: ['normal', 'as_rendered']
+ default: normal
+ version_added: 10.7.0
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
# my.cobbler.yml
plugin: community.general.cobbler
url: http://cobbler/cobbler_api
user: ansible-tester
password: secure
-'''
+"""
import socket
@@ -134,6 +151,18 @@ except ImportError:
HAS_XMLRPC_CLIENT = False
+class TimeoutTransport (xmlrpc_client.SafeTransport):
+ def __init__(self, timeout=socket._GLOBAL_DEFAULT_TIMEOUT):
+ super(TimeoutTransport, self).__init__()
+ self._timeout = timeout
+ self.context = None
+
+ def make_connection(self, host):
+ conn = xmlrpc_client.SafeTransport.make_connection(self, host)
+ conn.timeout = self._timeout
+ return conn
+
+
class InventoryModule(BaseInventoryPlugin, Cacheable):
''' Host inventory parser for ansible using cobbler as source. '''
@@ -142,7 +171,9 @@ class InventoryModule(BaseInventoryPlugin, Cacheable):
def __init__(self):
super(InventoryModule, self).__init__()
self.cache_key = None
- self.connection = None
+
+ if not HAS_XMLRPC_CLIENT:
+ raise AnsibleError('Could not import xmlrpc client library')
def verify_file(self, path):
valid = False
@@ -153,18 +184,6 @@ class InventoryModule(BaseInventoryPlugin, Cacheable):
self.display.vvv('Skipping due to inventory source not ending in "cobbler.yaml" nor "cobbler.yml"')
return valid
- def _get_connection(self):
- if not HAS_XMLRPC_CLIENT:
- raise AnsibleError('Could not import xmlrpc client library')
-
- if self.connection is None:
- self.display.vvvv(f'Connecting to {self.cobbler_url}\n')
- self.connection = xmlrpc_client.Server(self.cobbler_url, allow_none=True)
- self.token = None
- if self.get_option('user') is not None:
- self.token = self.connection.login(text_type(self.get_option('user')), text_type(self.get_option('password')))
- return self.connection
-
def _init_cache(self):
if self.cache_key not in self._cache:
self._cache[self.cache_key] = {}
@@ -178,12 +197,11 @@ class InventoryModule(BaseInventoryPlugin, Cacheable):
def _get_profiles(self):
if not self.use_cache or 'profiles' not in self._cache.get(self.cache_key, {}):
- c = self._get_connection()
try:
if self.token is not None:
- data = c.get_profiles(self.token)
+ data = self.cobbler.get_profiles(self.token)
else:
- data = c.get_profiles()
+ data = self.cobbler.get_profiles()
except (socket.gaierror, socket.error, xmlrpc_client.ProtocolError):
self._reload_cache()
else:
@@ -194,12 +212,20 @@ class InventoryModule(BaseInventoryPlugin, Cacheable):
def _get_systems(self):
if not self.use_cache or 'systems' not in self._cache.get(self.cache_key, {}):
- c = self._get_connection()
try:
if self.token is not None:
- data = c.get_systems(self.token)
+ data = self.cobbler.get_systems(self.token)
else:
- data = c.get_systems()
+ data = self.cobbler.get_systems()
+
+ # If more facts are requested, gather them all from Cobbler
+ if self.facts_level == "as_rendered":
+ for i, host in enumerate(data):
+ self.display.vvvv(f"Gathering all facts for {host['name']}\n")
+ if self.token is not None:
+ data[i] = self.cobbler.get_system_as_rendered(host['name'], self.token)
+ else:
+ data[i] = self.cobbler.get_system_as_rendered(host['name'])
except (socket.gaierror, socket.error, xmlrpc_client.ProtocolError):
self._reload_cache()
else:
@@ -229,6 +255,17 @@ class InventoryModule(BaseInventoryPlugin, Cacheable):
# get connection host
self.cobbler_url = self.get_option('url')
+ self.display.vvvv(f'Connecting to {self.cobbler_url}\n')
+
+ if 'connection_timeout' in self._options:
+ self.cobbler = xmlrpc_client.Server(self.cobbler_url, allow_none=True,
+ transport=TimeoutTransport(timeout=self.get_option('connection_timeout')))
+ else:
+ self.cobbler = xmlrpc_client.Server(self.cobbler_url, allow_none=True)
+ self.token = None
+ if self.get_option('user') is not None:
+ self.token = self.cobbler.login(text_type(self.get_option('user')), text_type(self.get_option('password')))
+
self.cache_key = self.get_cache_key(path)
self.use_cache = cache and self.get_option('cache')
@@ -238,6 +275,7 @@ class InventoryModule(BaseInventoryPlugin, Cacheable):
self.include_profiles = self.get_option('include_profiles')
self.group_by = self.get_option('group_by')
self.inventory_hostname = self.get_option('inventory_hostname')
+ self.facts_level = self.get_option('facts_level')
for profile in self._get_profiles():
if profile['parent']:
@@ -319,7 +357,7 @@ class InventoryModule(BaseInventoryPlugin, Cacheable):
# Add host to groups specified by group_by fields
for group_by in self.group_by:
- if host[group_by] == '<>':
+ if host[group_by] == '<>' or host[group_by] == '':
groups = []
else:
groups = [host[group_by]] if isinstance(host[group_by], str) else host[group_by]
diff --git a/plugins/inventory/gitlab_runners.py b/plugins/inventory/gitlab_runners.py
index 961f20444b..7a10b553a2 100644
--- a/plugins/inventory/gitlab_runners.py
+++ b/plugins/inventory/gitlab_runners.py
@@ -7,56 +7,56 @@
from __future__ import annotations
-DOCUMENTATION = '''
- name: gitlab_runners
- author:
- - Stefan Heitmüller (@morph027)
- short_description: Ansible dynamic inventory plugin for GitLab runners.
- requirements:
- - python-gitlab > 1.8.0
- extends_documentation_fragment:
- - constructed
- description:
- - Reads inventories from the GitLab API.
- - Uses a YAML configuration file gitlab_runners.[yml|yaml].
- options:
- plugin:
- description: The name of this plugin, it should always be set to 'gitlab_runners' for this plugin to recognize it as its own.
- type: str
- required: true
- choices:
- - gitlab_runners
- - community.general.gitlab_runners
- server_url:
- description: The URL of the GitLab server, with protocol (i.e. http or https).
- env:
- - name: GITLAB_SERVER_URL
- version_added: 1.0.0
- type: str
- required: true
- api_token:
- description: GitLab token for logging in.
- env:
- - name: GITLAB_API_TOKEN
- version_added: 1.0.0
- type: str
- aliases:
- - private_token
- - access_token
- filter:
- description: filter runners from GitLab API
- env:
- - name: GITLAB_FILTER
- version_added: 1.0.0
- type: str
- choices: ['active', 'paused', 'online', 'specific', 'shared']
- verbose_output:
- description: Toggle to (not) include all available nodes metadata
- type: bool
- default: true
-'''
+DOCUMENTATION = r"""
+name: gitlab_runners
+author:
+ - Stefan Heitmüller (@morph027)
+short_description: Ansible dynamic inventory plugin for GitLab runners
+requirements:
+ - python-gitlab > 1.8.0
+extends_documentation_fragment:
+ - constructed
+description:
+ - Reads inventories from the GitLab API.
+ - Uses a YAML configuration file gitlab_runners.[yml|yaml].
+options:
+ plugin:
+ description: The name of this plugin, it should always be set to V(gitlab_runners) for this plugin to recognize it as its own.
+ type: str
+ required: true
+ choices:
+ - gitlab_runners
+ - community.general.gitlab_runners
+ server_url:
+ description: The URL of the GitLab server, with protocol (i.e. http or https).
+ env:
+ - name: GITLAB_SERVER_URL
+ version_added: 1.0.0
+ type: str
+ required: true
+ api_token:
+ description: GitLab token for logging in.
+ env:
+ - name: GITLAB_API_TOKEN
+ version_added: 1.0.0
+ type: str
+ aliases:
+ - private_token
+ - access_token
+ filter:
+ description: Filter runners from GitLab API.
+ env:
+ - name: GITLAB_FILTER
+ version_added: 1.0.0
+ type: str
+ choices: ['active', 'paused', 'online', 'specific', 'shared']
+ verbose_output:
+ description: Toggle to (not) include all available nodes metadata.
+ type: bool
+ default: true
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
---
# gitlab_runners.yml
plugin: community.general.gitlab_runners
@@ -79,7 +79,7 @@ keyed_groups:
# hint: labels containing special characters will be converted to safe names
- key: 'tag_list'
prefix: tag
-'''
+"""
from ansible.errors import AnsibleError, AnsibleParserError
from ansible.plugins.inventory import BaseInventoryPlugin, Constructable
diff --git a/plugins/inventory/icinga2.py b/plugins/inventory/icinga2.py
index 7ee87df065..64d77b437d 100644
--- a/plugins/inventory/icinga2.py
+++ b/plugins/inventory/icinga2.py
@@ -6,71 +6,70 @@
from __future__ import annotations
-DOCUMENTATION = '''
- name: icinga2
- short_description: Icinga2 inventory source
- version_added: 3.7.0
- author:
- - Cliff Hults (@BongoEADGC6)
+DOCUMENTATION = r"""
+name: icinga2
+short_description: Icinga2 inventory source
+version_added: 3.7.0
+author:
+ - Cliff Hults (@BongoEADGC6)
+description:
+ - Get inventory hosts from the Icinga2 API.
+ - Uses a configuration file as an inventory source, it must end in C(.icinga2.yml) or C(.icinga2.yaml).
+extends_documentation_fragment:
+ - constructed
+options:
+ strict:
+ version_added: 4.4.0
+ compose:
+ version_added: 4.4.0
+ groups:
+ version_added: 4.4.0
+ keyed_groups:
+ version_added: 4.4.0
+ plugin:
+ description: Name of the plugin.
+ required: true
+ type: string
+ choices: ['community.general.icinga2']
+ url:
+ description: Root URL of Icinga2 API.
+ type: string
+ required: true
+ user:
+ description: Username to query the API.
+ type: string
+ required: true
+ password:
+ description: Password to query the API.
+ type: string
+ required: true
+ host_filter:
description:
- - Get inventory hosts from the Icinga2 API.
- - "Uses a configuration file as an inventory source, it must end in
- C(.icinga2.yml) or C(.icinga2.yaml)."
- extends_documentation_fragment:
- - constructed
- options:
- strict:
- version_added: 4.4.0
- compose:
- version_added: 4.4.0
- groups:
- version_added: 4.4.0
- keyed_groups:
- version_added: 4.4.0
- plugin:
- description: Name of the plugin.
- required: true
- type: string
- choices: ['community.general.icinga2']
- url:
- description: Root URL of Icinga2 API.
- type: string
- required: true
- user:
- description: Username to query the API.
- type: string
- required: true
- password:
- description: Password to query the API.
- type: string
- required: true
- host_filter:
- description:
- - An Icinga2 API valid host filter. Leave blank for no filtering
- type: string
- required: false
- validate_certs:
- description: Enables or disables SSL certificate verification.
- type: boolean
- default: true
- inventory_attr:
- description:
- - Allows the override of the inventory name based on different attributes.
- - This allows for changing the way limits are used.
- - The current default, V(address), is sometimes not unique or present. We recommend to use V(name) instead.
- type: string
- default: address
- choices: ['name', 'display_name', 'address']
- version_added: 4.2.0
- group_by_hostgroups:
- description:
- - Uses Icinga2 hostgroups as groups.
- type: boolean
- default: true
- version_added: 8.4.0
-'''
+ - An Icinga2 API valid host filter. Leave blank for no filtering.
+ type: string
+ required: false
+ validate_certs:
+ description: Enables or disables SSL certificate verification.
+ type: boolean
+ default: true
+ inventory_attr:
+ description:
+ - Allows the override of the inventory name based on different attributes.
+ - This allows for changing the way limits are used.
+ - The current default, V(address), is sometimes not unique or present. We recommend to use V(name) instead.
+ type: string
+ default: address
+ choices: ['name', 'display_name', 'address']
+ version_added: 4.2.0
+ group_by_hostgroups:
+ description:
+ - Uses Icinga2 hostgroups as groups.
+ type: boolean
+ default: true
+ version_added: 8.4.0
+"""
-EXAMPLES = r'''
+EXAMPLES = r"""
# my.icinga2.yml
plugin: community.general.icinga2
url: http://localhost:5665
@@ -93,7 +92,7 @@ compose:
# set 'ansible_user' and 'ansible_port' from icinga2 host vars
ansible_user: icinga2_attributes.vars.ansible_user
ansible_port: icinga2_attributes.vars.ansible_port | default(22)
-'''
+"""
import json
@@ -291,11 +290,11 @@ class InventoryModule(BaseInventoryPlugin, Constructable):
self.group_by_hostgroups = self.get_option('group_by_hostgroups')
if self.templar.is_template(self.icinga2_url):
- self.icinga2_url = self.templar.template(variable=self.icinga2_url, disable_lookups=False)
+ self.icinga2_url = self.templar.template(variable=self.icinga2_url)
if self.templar.is_template(self.icinga2_user):
- self.icinga2_user = self.templar.template(variable=self.icinga2_user, disable_lookups=False)
+ self.icinga2_user = self.templar.template(variable=self.icinga2_user)
if self.templar.is_template(self.icinga2_password):
- self.icinga2_password = self.templar.template(variable=self.icinga2_password, disable_lookups=False)
+ self.icinga2_password = self.templar.template(variable=self.icinga2_password)
self.icinga2_url = f"{self.icinga2_url.rstrip('/')}/v1"
diff --git a/plugins/inventory/iocage.py b/plugins/inventory/iocage.py
index 6edac6d005..603003d617 100644
--- a/plugins/inventory/iocage.py
+++ b/plugins/inventory/iocage.py
@@ -6,31 +6,27 @@
from __future__ import annotations
-DOCUMENTATION = r'''
+DOCUMENTATION = r"""
name: iocage
-short_description: iocage inventory source
+short_description: C(iocage) inventory source
version_added: 10.2.0
author:
- Vladimir Botka (@vbotka)
requirements:
- iocage >= 1.8
description:
- - Get inventory hosts from the iocage jail manager running on O(host).
- - By default, O(host) is V(localhost). If O(host) is not V(localhost) it
- is expected that the user running Ansible on the controller can
- connect to the O(host) account O(user) with SSH non-interactively and
- execute the command C(iocage list).
- - Uses a configuration file as an inventory source, it must end
- in C(.iocage.yml) or C(.iocage.yaml).
+ - Get inventory hosts from the C(iocage) jail manager running on O(host).
+ - By default, O(host) is V(localhost). If O(host) is not V(localhost) it is expected that the user running Ansible on the
+ controller can connect to the O(host) account O(user) with SSH non-interactively and execute the command C(iocage list).
+ - Uses a configuration file as an inventory source, it must end in C(.iocage.yml) or C(.iocage.yaml).
extends_documentation_fragment:
- ansible.builtin.constructed
- ansible.builtin.inventory_cache
options:
plugin:
description:
- - The name of this plugin, it should always be set to
- V(community.general.iocage) for this plugin to recognize
- it as its own.
+ - The name of this plugin, it should always be set to V(community.general.iocage) for this plugin to recognize it as
+ its own.
required: true
choices: ['community.general.iocage']
type: str
@@ -40,10 +36,8 @@ options:
default: localhost
user:
description:
- - C(iocage) user.
- It is expected that the O(user) is able to connect to the
- O(host) with SSH and execute the command C(iocage list).
- This option is not required if O(host) is V(localhost).
+ - C(iocage) user. It is expected that the O(user) is able to connect to the O(host) with SSH and execute the command
+ C(iocage list). This option is not required if O(host=localhost).
type: str
sudo:
description:
@@ -61,8 +55,7 @@ options:
version_added: 10.3.0
get_properties:
description:
- - Get jails' properties.
- Creates dictionary C(iocage_properties) for each added host.
+ - Get jails' properties. Creates dictionary C(iocage_properties) for each added host.
type: bool
default: false
env:
@@ -80,25 +73,34 @@ options:
type: list
elements: path
version_added: 10.4.0
+ inventory_hostname_tag:
+ description:
+ - The name of the tag in the C(iocage properties notes) that contains the jails alias.
+ - By default, the C(iocage list -l) column C(NAME) is used to name the jail.
+ - This option requires the notes format C("t1=v1 t2=v2 ...").
+ - The option O(get_properties) must be enabled.
+ type: str
+ version_added: 11.0.0
+ inventory_hostname_required:
+ description:
+ - If enabled, the tag declared in O(inventory_hostname_tag) is required.
+ type: bool
+ default: false
+ version_added: 11.0.0
notes:
- - You might want to test the command C(ssh user@host iocage list -l) on
- the controller before using this inventory plugin with O(user) specified
- and with O(host) other than V(localhost).
- - If you run this inventory plugin on V(localhost) C(ssh) is not used.
- In this case, test the command C(iocage list -l).
+ - You might want to test the command C(ssh user@host iocage list -l) on the controller before using this inventory plugin
+ with O(user) specified and with O(host) other than V(localhost).
+ - If you run this inventory plugin on V(localhost) C(ssh) is not used. In this case, test the command C(iocage list -l).
- This inventory plugin creates variables C(iocage_*) for each added host.
- - The values of these variables are collected from the output of the
- command C(iocage list -l).
+ - The values of these variables are collected from the output of the command C(iocage list -l).
- The names of these variables correspond to the output columns.
- The column C(NAME) is used to name the added host.
- - The option O(hooks_results) expects the C(poolname) of a jail is mounted to
- C(/poolname). For example, if you activate the pool C(iocage) this plugin
- expects to find the O(hooks_results) items in the path
- C(/iocage/iocage/jails//root). If you mount the C(poolname) to a
- different path the easiest remedy is to create a symlink.
-'''
+ - The option O(hooks_results) expects the C(poolname) of a jail is mounted to C(/poolname). For example, if you activate
+ the pool C(iocage) this plugin expects to find the O(hooks_results) items in the path C(/iocage/iocage/jails//root).
+ If you mount the C(poolname) to a different path the easiest remedy is to create a symlink.
+"""
-EXAMPLES = r'''
+EXAMPLES = r"""
---
# file name must end with iocage.yaml or iocage.yml
plugin: community.general.iocage
@@ -168,7 +170,7 @@ compose:
ansible_host: iocage_hooks.0
groups:
test: inventory_hostname.startswith('test')
-'''
+"""
import re
import os
@@ -253,6 +255,8 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
env = self.get_option('env')
get_properties = self.get_option('get_properties')
hooks_results = self.get_option('hooks_results')
+ inventory_hostname_tag = self.get_option('inventory_hostname_tag')
+ inventory_hostname_required = self.get_option('inventory_hostname_required')
cmd = []
my_env = os.environ.copy()
@@ -334,7 +338,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
for hostname, host_vars in results['_meta']['hostvars'].items():
iocage_hooks = []
for hook in hooks_results:
- path = "/" + iocage_pool + "/iocage/jails/" + hostname + "/root" + hook
+ path = f"/{iocage_pool}/iocage/jails/{hostname}/root{hook}"
cmd_cat_hook = cmd.copy()
cmd_cat_hook.append('cat')
cmd_cat_hook.append(path)
@@ -357,6 +361,21 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
results['_meta']['hostvars'][hostname]['iocage_hooks'] = iocage_hooks
+ # Optionally, get the jails names from the properties notes.
+ # Requires the notes format "t1=v1 t2=v2 ..."
+ if inventory_hostname_tag:
+ if not get_properties:
+ raise AnsibleError('Jail properties are needed to use inventory_hostname_tag. Enable get_properties')
+ update = {}
+ for hostname, host_vars in results['_meta']['hostvars'].items():
+ tags = dict(tag.split('=', 1) for tag in host_vars['iocage_properties']['notes'].split() if '=' in tag)
+ if inventory_hostname_tag in tags:
+ update[hostname] = tags[inventory_hostname_tag]
+ elif inventory_hostname_required:
+ raise AnsibleError(f'Mandatory tag {inventory_hostname_tag!r} is missing in the properties notes.')
+ for hostname, alias in update.items():
+ results['_meta']['hostvars'][alias] = results['_meta']['hostvars'].pop(hostname)
+
return results
def get_jails(self, t_stdout, results):
diff --git a/plugins/inventory/linode.py b/plugins/inventory/linode.py
index 594cf30eba..bf6faba07a 100644
--- a/plugins/inventory/linode.py
+++ b/plugins/inventory/linode.py
@@ -5,79 +5,78 @@
from __future__ import annotations
-DOCUMENTATION = r'''
- name: linode
- author:
- - Luke Murphy (@decentral1se)
- short_description: Ansible dynamic inventory plugin for Linode.
- requirements:
- - linode_api4 >= 2.0.0
- description:
- - Reads inventories from the Linode API v4.
- - Uses a YAML configuration file that ends with linode.(yml|yaml).
- - Linode labels are used by default as the hostnames.
- - The default inventory groups are built from groups (deprecated by
- Linode) and not tags.
- extends_documentation_fragment:
- - constructed
- - inventory_cache
- options:
- cache:
- version_added: 4.5.0
- cache_plugin:
- version_added: 4.5.0
- cache_timeout:
- version_added: 4.5.0
- cache_connection:
- version_added: 4.5.0
- cache_prefix:
- version_added: 4.5.0
- plugin:
- description: Marks this as an instance of the 'linode' plugin.
- type: string
- required: true
- choices: ['linode', 'community.general.linode']
- ip_style:
- description: Populate hostvars with all information available from the Linode APIv4.
- type: string
- default: plain
- choices:
- - plain
- - api
- version_added: 3.6.0
- access_token:
- description: The Linode account personal access token.
- type: string
- required: true
- env:
- - name: LINODE_ACCESS_TOKEN
- regions:
- description: Populate inventory with instances in this region.
- default: []
- type: list
- elements: string
- tags:
- description: Populate inventory only with instances which have at least one of the tags listed here.
- default: []
- type: list
- elements: string
- version_added: 2.0.0
- types:
- description: Populate inventory with instances with this type.
- default: []
- type: list
- elements: string
- strict:
- version_added: 2.0.0
- compose:
- version_added: 2.0.0
- groups:
- version_added: 2.0.0
- keyed_groups:
- version_added: 2.0.0
-'''
+DOCUMENTATION = r"""
+name: linode
+author:
+ - Luke Murphy (@decentral1se)
+short_description: Ansible dynamic inventory plugin for Linode
+requirements:
+ - linode_api4 >= 2.0.0
+description:
+ - Reads inventories from the Linode API v4.
+ - Uses a YAML configuration file that ends with linode.(yml|yaml).
+ - Linode labels are used by default as the hostnames.
+ - The default inventory groups are built from groups (deprecated by Linode) and not tags.
+extends_documentation_fragment:
+ - constructed
+ - inventory_cache
+options:
+ cache:
+ version_added: 4.5.0
+ cache_plugin:
+ version_added: 4.5.0
+ cache_timeout:
+ version_added: 4.5.0
+ cache_connection:
+ version_added: 4.5.0
+ cache_prefix:
+ version_added: 4.5.0
+ plugin:
+ description: Marks this as an instance of the 'linode' plugin.
+ type: string
+ required: true
+ choices: ['linode', 'community.general.linode']
+ ip_style:
+ description: Populate hostvars with all information available from the Linode APIv4.
+ type: string
+ default: plain
+ choices:
+ - plain
+ - api
+ version_added: 3.6.0
+ access_token:
+ description: The Linode account personal access token.
+ type: string
+ required: true
+ env:
+ - name: LINODE_ACCESS_TOKEN
+ regions:
+ description: Populate inventory with instances in this region.
+ default: []
+ type: list
+ elements: string
+ tags:
+ description: Populate inventory only with instances which have at least one of the tags listed here.
+ default: []
+ type: list
+ elements: string
+ version_added: 2.0.0
+ types:
+ description: Populate inventory with instances with this type.
+ default: []
+ type: list
+ elements: string
+ strict:
+ version_added: 2.0.0
+ compose:
+ version_added: 2.0.0
+ groups:
+ version_added: 2.0.0
+ keyed_groups:
+ version_added: 2.0.0
+"""
-EXAMPLES = r'''
+EXAMPLES = r"""
---
# Minimal example. `LINODE_ACCESS_TOKEN` is exposed in environment.
plugin: community.general.linode
@@ -124,7 +123,7 @@ access_token: foobar
ip_style: api
compose:
ansible_host: "ipv4 | community.general.json_query('[?public==`false`].address') | first"
-'''
+"""
from ansible.errors import AnsibleError
from ansible.plugins.inventory import BaseInventoryPlugin, Constructable, Cacheable
@@ -150,7 +149,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
access_token = self.get_option('access_token')
if self.templar.is_template(access_token):
- access_token = self.templar.template(variable=access_token, disable_lookups=False)
+ access_token = self.templar.template(variable=access_token)
if access_token is None:
raise AnsibleError((
diff --git a/plugins/inventory/lxd.py b/plugins/inventory/lxd.py
index 480af8388c..efdca6563e 100644
--- a/plugins/inventory/lxd.py
+++ b/plugins/inventory/lxd.py
@@ -5,108 +5,108 @@
from __future__ import annotations
-DOCUMENTATION = r'''
- name: lxd
- short_description: Returns Ansible inventory from lxd host
+DOCUMENTATION = r"""
+name: lxd
+short_description: Returns Ansible inventory from lxd host
+description:
+ - Get inventory from the lxd.
+ - Uses a YAML configuration file that ends with 'lxd.(yml|yaml)'.
+version_added: "3.0.0"
+author: "Frank Dornheim (@conloos)"
+requirements:
+ - ipaddress
+ - lxd >= 4.0
+options:
+ plugin:
+ description: Token that ensures this is a source file for the 'lxd' plugin.
+ type: string
+ required: true
+ choices: ['community.general.lxd']
+ url:
description:
- - Get inventory from the lxd.
- - Uses a YAML configuration file that ends with 'lxd.(yml|yaml)'.
- version_added: "3.0.0"
- author: "Frank Dornheim (@conloos)"
- requirements:
- - ipaddress
- - lxd >= 4.0
- options:
- plugin:
- description: Token that ensures this is a source file for the 'lxd' plugin.
- type: string
- required: true
- choices: [ 'community.general.lxd' ]
- url:
- description:
- - The unix domain socket path or the https URL for the lxd server.
- - Sockets in filesystem have to start with C(unix:).
- - Mostly C(unix:/var/lib/lxd/unix.socket) or C(unix:/var/snap/lxd/common/lxd/unix.socket).
- type: string
- default: unix:/var/snap/lxd/common/lxd/unix.socket
- client_key:
- description:
- - The client certificate key file path.
- aliases: [ key_file ]
- default: $HOME/.config/lxc/client.key
- type: path
- client_cert:
- description:
- - The client certificate file path.
- aliases: [ cert_file ]
- default: $HOME/.config/lxc/client.crt
- type: path
- server_cert:
- description:
- - The server certificate file path.
- type: path
- version_added: 8.0.0
- server_check_hostname:
- description:
- - This option controls if the server's hostname is checked as part of the HTTPS connection verification.
- This can be useful to disable, if for example, the server certificate provided (see O(server_cert) option)
- does not cover a name matching the one used to communicate with the server. Such mismatch is common as LXD
- generates self-signed server certificates by default.
- type: bool
- default: true
- version_added: 8.0.0
- trust_password:
- description:
- - The client trusted password.
- - You need to set this password on the lxd server before
- running this module using the following command
- C(lxc config set core.trust_password )
- See U(https://documentation.ubuntu.com/lxd/en/latest/authentication/#adding-client-certificates-using-a-trust-password).
- - If O(trust_password) is set, this module send a request for authentication before sending any requests.
- type: str
- state:
- description: Filter the instance according to the current status.
- type: str
- default: none
- choices: [ 'STOPPED', 'STARTING', 'RUNNING', 'none' ]
- project:
- description: Filter the instance according to the given project.
- type: str
- default: default
- version_added: 6.2.0
- type_filter:
- description:
- - Filter the instances by type V(virtual-machine), V(container) or V(both).
- - The first version of the inventory only supported containers.
- type: str
- default: container
- choices: [ 'virtual-machine', 'container', 'both' ]
- version_added: 4.2.0
- prefered_instance_network_interface:
- description:
- - If an instance has multiple network interfaces, select which one is the preferred as pattern.
- - Combined with the first number that can be found e.g. 'eth' + 0.
- - The option has been renamed from O(prefered_container_network_interface) to O(prefered_instance_network_interface)
- in community.general 3.8.0. The old name still works as an alias.
- type: str
- default: eth
- aliases:
- - prefered_container_network_interface
- prefered_instance_network_family:
- description:
- - If an instance has multiple network interfaces, which one is the preferred by family.
- - Specify V(inet) for IPv4 and V(inet6) for IPv6.
- type: str
- default: inet
- choices: [ 'inet', 'inet6' ]
- groupby:
- description:
- - Create groups by the following keywords C(location), C(network_range), C(os), C(pattern), C(profile), C(release), C(type), C(vlanid).
- - See example for syntax.
- type: dict
-'''
+ - The unix domain socket path or the https URL for the lxd server.
+ - Sockets in filesystem have to start with C(unix:).
+ - Mostly C(unix:/var/lib/lxd/unix.socket) or C(unix:/var/snap/lxd/common/lxd/unix.socket).
+ type: string
+ default: unix:/var/snap/lxd/common/lxd/unix.socket
+ client_key:
+ description:
+ - The client certificate key file path.
+ aliases: [key_file]
+ default: $HOME/.config/lxc/client.key
+ type: path
+ client_cert:
+ description:
+ - The client certificate file path.
+ aliases: [cert_file]
+ default: $HOME/.config/lxc/client.crt
+ type: path
+ server_cert:
+ description:
+ - The server certificate file path.
+ type: path
+ version_added: 8.0.0
+ server_check_hostname:
+ description:
+ - This option controls if the server's hostname is checked as part of the HTTPS connection verification. This can be
+ useful to disable, if for example, the server certificate provided (see O(server_cert) option) does not cover a name
+ matching the one used to communicate with the server. Such mismatch is common as LXD generates self-signed server
+ certificates by default.
+ type: bool
+ default: true
+ version_added: 8.0.0
+ trust_password:
+ description:
+ - The client trusted password.
+ - You need to set this password on the lxd server before running this module using the following command C(lxc config
+ set core.trust_password ) See
+ U(https://documentation.ubuntu.com/lxd/en/latest/authentication/#adding-client-certificates-using-a-trust-password).
+ - If O(trust_password) is set, this module send a request for authentication before sending any requests.
+ type: str
+ state:
+ description: Filter the instance according to the current status.
+ type: str
+ default: none
+ choices: ['STOPPED', 'STARTING', 'RUNNING', 'none']
+ project:
+ description: Filter the instance according to the given project.
+ type: str
+ default: default
+ version_added: 6.2.0
+ type_filter:
+ description:
+ - Filter the instances by type V(virtual-machine), V(container) or V(both).
+ - The first version of the inventory only supported containers.
+ type: str
+ default: container
+ choices: ['virtual-machine', 'container', 'both']
+ version_added: 4.2.0
+ prefered_instance_network_interface:
+ description:
+ - If an instance has multiple network interfaces, select which one is the preferred as pattern.
+ - Combined with the first number that can be found, for example C(eth) + C(0).
+ - The option has been renamed from O(prefered_container_network_interface) to O(prefered_instance_network_interface)
+ in community.general 3.8.0. The old name still works as an alias.
+ type: str
+ default: eth
+ aliases:
+ - prefered_container_network_interface
+ prefered_instance_network_family:
+ description:
+ - If an instance has multiple network interfaces, which one is the preferred by family.
+ - Specify V(inet) for IPv4 and V(inet6) for IPv6.
+ type: str
+ default: inet
+ choices: ['inet', 'inet6']
+ groupby:
+ description:
+ - Create groups by the following keywords C(location), C(network_range), C(os), C(pattern), C(profile), C(release),
+ C(type), C(vlanid).
+ - See example for syntax.
+ type: dict
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
---
# simple lxd.yml
plugin: community.general.lxd
@@ -165,7 +165,7 @@ groupby:
projectInternals:
type: project
attribute: internals
-'''
+"""
import json
import re
diff --git a/plugins/inventory/nmap.py b/plugins/inventory/nmap.py
index 3bd6e9fda3..3339d66b46 100644
--- a/plugins/inventory/nmap.py
+++ b/plugins/inventory/nmap.py
@@ -5,102 +5,102 @@
from __future__ import annotations
-DOCUMENTATION = '''
- author: Unknown (!UNKNOWN)
- name: nmap
- short_description: Uses nmap to find hosts to target
+DOCUMENTATION = r"""
+author: Unknown (!UNKNOWN)
+name: nmap
+short_description: Uses nmap to find hosts to target
+description:
+ - Uses a YAML configuration file with a valid YAML extension.
+extends_documentation_fragment:
+ - constructed
+ - inventory_cache
+requirements:
+ - nmap CLI installed
+options:
+ plugin:
+ description: Token that ensures this is a source file for the P(community.general.nmap#inventory) plugin.
+ type: string
+ required: true
+ choices: ['nmap', 'community.general.nmap']
+ sudo:
+ description: Set to V(true) to execute a C(sudo nmap) plugin scan.
+ version_added: 4.8.0
+ default: false
+ type: boolean
+ address:
+ description: Network IP or range of IPs to scan, you can use a simple range (10.2.2.15-25) or CIDR notation.
+ type: string
+ required: true
+ env:
+ - name: ANSIBLE_NMAP_ADDRESS
+ version_added: 6.6.0
+ exclude:
description:
- - Uses a YAML configuration file with a valid YAML extension.
- extends_documentation_fragment:
- - constructed
- - inventory_cache
- requirements:
- - nmap CLI installed
- options:
- plugin:
- description: token that ensures this is a source file for the 'nmap' plugin.
- type: string
- required: true
- choices: ['nmap', 'community.general.nmap']
- sudo:
- description: Set to V(true) to execute a C(sudo nmap) plugin scan.
- version_added: 4.8.0
- default: false
- type: boolean
- address:
- description: Network IP or range of IPs to scan, you can use a simple range (10.2.2.15-25) or CIDR notation.
- type: string
- required: true
- env:
- - name: ANSIBLE_NMAP_ADDRESS
- version_added: 6.6.0
- exclude:
- description:
- - List of addresses to exclude.
- - For example V(10.2.2.15-25) or V(10.2.2.15,10.2.2.16).
- type: list
- elements: string
- env:
- - name: ANSIBLE_NMAP_EXCLUDE
- version_added: 6.6.0
- port:
- description:
- - Only scan specific port or port range (C(-p)).
- - For example, you could pass V(22) for a single port, V(1-65535) for a range of ports,
- or V(U:53,137,T:21-25,139,8080,S:9) to check port 53 with UDP, ports 21-25 with TCP, port 9 with SCTP, and ports 137, 139, and 8080 with all.
- type: string
- version_added: 6.5.0
- ports:
- description: Enable/disable scanning ports.
- type: boolean
- default: true
- ipv4:
- description: use IPv4 type addresses
- type: boolean
- default: true
- ipv6:
- description: use IPv6 type addresses
- type: boolean
- default: true
- udp_scan:
- description:
- - Scan via UDP.
- - Depending on your system you might need O(sudo=true) for this to work.
- type: boolean
- default: false
- version_added: 6.1.0
- icmp_timestamp:
- description:
- - Scan via ICMP Timestamp (C(-PP)).
- - Depending on your system you might need O(sudo=true) for this to work.
- type: boolean
- default: false
- version_added: 6.1.0
- open:
- description: Only scan for open (or possibly open) ports.
- type: boolean
- default: false
- version_added: 6.5.0
- dns_resolve:
- description: Whether to always (V(true)) or never (V(false)) do DNS resolution.
- type: boolean
- default: false
- version_added: 6.1.0
- dns_servers:
- description: Specify which DNS servers to use for name resolution.
- type: list
- elements: string
- version_added: 10.5.0
- use_arp_ping:
- description: Whether to always (V(true)) use the quick ARP ping or (V(false)) a slower but more reliable method.
- type: boolean
- default: true
- version_added: 7.4.0
- notes:
- - At least one of O(ipv4) or O(ipv6) is required to be V(true); both can be V(true), but they cannot both be V(false).
- - 'TODO: add OS fingerprinting'
-'''
-EXAMPLES = '''
+ - List of addresses to exclude.
+ - For example V(10.2.2.15-25) or V(10.2.2.15,10.2.2.16).
+ type: list
+ elements: string
+ env:
+ - name: ANSIBLE_NMAP_EXCLUDE
+ version_added: 6.6.0
+ port:
+ description:
+ - Only scan specific port or port range (C(-p)).
+ - For example, you could pass V(22) for a single port, V(1-65535) for a range of ports, or V(U:53,137,T:21-25,139,8080,S:9)
+ to check port 53 with UDP, ports 21-25 with TCP, port 9 with SCTP, and ports 137, 139, and 8080 with all.
+ type: string
+ version_added: 6.5.0
+ ports:
+ description: Enable/disable scanning ports.
+ type: boolean
+ default: true
+ ipv4:
+ description: Use IPv4 type addresses.
+ type: boolean
+ default: true
+ ipv6:
+ description: Use IPv6 type addresses.
+ type: boolean
+ default: true
+ udp_scan:
+ description:
+ - Scan using UDP.
+ - Depending on your system you might need O(sudo=true) for this to work.
+ type: boolean
+ default: false
+ version_added: 6.1.0
+ icmp_timestamp:
+ description:
+ - Scan using ICMP Timestamp (C(-PP)).
+ - Depending on your system you might need O(sudo=true) for this to work.
+ type: boolean
+ default: false
+ version_added: 6.1.0
+ open:
+ description: Only scan for open (or possibly open) ports.
+ type: boolean
+ default: false
+ version_added: 6.5.0
+ dns_resolve:
+ description: Whether to always (V(true)) or never (V(false)) do DNS resolution.
+ type: boolean
+ default: false
+ version_added: 6.1.0
+ dns_servers:
+ description: Specify which DNS servers to use for name resolution.
+ type: list
+ elements: string
+ version_added: 10.5.0
+ use_arp_ping:
+ description: Whether to always (V(true)) use the quick ARP ping or (V(false)) a slower but more reliable method.
+ type: boolean
+ default: true
+ version_added: 7.4.0
+notes:
+ - At least one of O(ipv4) or O(ipv6) is required to be V(true); both can be V(true), but they cannot both be V(false).
+ - 'TODO: add OS fingerprinting.'
+"""
+EXAMPLES = r"""
---
# inventory.config file in YAML format
plugin: community.general.nmap
@@ -122,7 +122,7 @@ exclude: 192.168.0.1, web.example.com
port: 22, 443
groups:
web_servers: "ports | selectattr('port', 'equalto', '443')"
-'''
+"""
import os
import re
diff --git a/plugins/inventory/online.py b/plugins/inventory/online.py
index 9e29c91e54..8b4821a009 100644
--- a/plugins/inventory/online.py
+++ b/plugins/inventory/online.py
@@ -5,49 +5,49 @@
from __future__ import annotations
-DOCUMENTATION = r'''
- name: online
- author:
- - Remy Leone (@remyleone)
- short_description: Scaleway (previously Online SAS or Online.net) inventory source
- description:
- - Get inventory hosts from Scaleway (previously Online SAS or Online.net).
- options:
- plugin:
- description: token that ensures this is a source file for the 'online' plugin.
- type: string
- required: true
- choices: ['online', 'community.general.online']
- oauth_token:
- required: true
- description: Online OAuth token.
- type: string
- env:
- # in order of precedence
- - name: ONLINE_TOKEN
- - name: ONLINE_API_KEY
- - name: ONLINE_OAUTH_TOKEN
- hostnames:
- description: List of preference about what to use as an hostname.
- type: list
- elements: string
- default:
- - public_ipv4
- choices:
- - public_ipv4
- - private_ipv4
- - hostname
- groups:
- description: List of groups.
- type: list
- elements: string
- choices:
- - location
- - offer
- - rpn
-'''
+DOCUMENTATION = r"""
+name: online
+author:
+ - Remy Leone (@remyleone)
+short_description: Scaleway (previously Online SAS or Online.net) inventory source
+description:
+ - Get inventory hosts from Scaleway (previously Online SAS or Online.net).
+options:
+ plugin:
+ description: Token that ensures this is a source file for the P(community.general.online#inventory) plugin.
+ type: string
+ required: true
+ choices: ['online', 'community.general.online']
+ oauth_token:
+ required: true
+ description: Online OAuth token.
+ type: string
+ env:
+ # in order of precedence
+ - name: ONLINE_TOKEN
+ - name: ONLINE_API_KEY
+ - name: ONLINE_OAUTH_TOKEN
+ hostnames:
+ description: List of preference about what to use as an hostname.
+ type: list
+ elements: string
+ default:
+ - public_ipv4
+ choices:
+ - public_ipv4
+ - private_ipv4
+ - hostname
+ groups:
+ description: List of groups.
+ type: list
+ elements: string
+ choices:
+ - location
+ - offer
+ - rpn
+"""
-EXAMPLES = r'''
+EXAMPLES = r"""
# online_inventory.yml file in YAML format
# Example command line: ansible-inventory --list -i online_inventory.yml
@@ -58,7 +58,7 @@ groups:
- location
- offer
- rpn
-'''
+"""
import json
from sys import version as python_version
diff --git a/plugins/inventory/opennebula.py b/plugins/inventory/opennebula.py
index ed26880d07..8ced301dd1 100644
--- a/plugins/inventory/opennebula.py
+++ b/plugins/inventory/opennebula.py
@@ -6,77 +6,75 @@
from __future__ import annotations
-DOCUMENTATION = r'''
- name: opennebula
- author:
- - Kristian Feldsam (@feldsam)
- short_description: OpenNebula inventory source
- version_added: "3.8.0"
- extends_documentation_fragment:
- - constructed
+DOCUMENTATION = r"""
+name: opennebula
+author:
+ - Kristian Feldsam (@feldsam)
+short_description: OpenNebula inventory source
+version_added: "3.8.0"
+extends_documentation_fragment:
+ - constructed
+description:
+ - Get inventory hosts from OpenNebula cloud.
+ - Uses an YAML configuration file ending with either C(opennebula.yml) or C(opennebula.yaml) to set parameter values.
+ - Uses O(api_authfile), C(~/.one/one_auth), or E(ONE_AUTH) pointing to a OpenNebula credentials file.
+options:
+ plugin:
+ description: Token that ensures this is a source file for the 'opennebula' plugin.
+ type: string
+ required: true
+ choices: [community.general.opennebula]
+ api_url:
description:
- - Get inventory hosts from OpenNebula cloud.
- - Uses an YAML configuration file ending with either C(opennebula.yml) or C(opennebula.yaml)
- to set parameter values.
- - Uses O(api_authfile), C(~/.one/one_auth), or E(ONE_AUTH) pointing to a OpenNebula credentials file.
- options:
- plugin:
- description: Token that ensures this is a source file for the 'opennebula' plugin.
- type: string
- required: true
- choices: [ community.general.opennebula ]
- api_url:
- description:
- - URL of the OpenNebula RPC server.
- - It is recommended to use HTTPS so that the username/password are not
- transferred over the network unencrypted.
- - If not set then the value of the E(ONE_URL) environment variable is used.
- env:
- - name: ONE_URL
- required: true
- type: string
- api_username:
- description:
- - Name of the user to login into the OpenNebula RPC server. If not set
- then the value of the E(ONE_USERNAME) environment variable is used.
- env:
- - name: ONE_USERNAME
- type: string
- api_password:
- description:
- - Password or a token of the user to login into OpenNebula RPC server.
- - If not set, the value of the E(ONE_PASSWORD) environment variable is used.
- env:
- - name: ONE_PASSWORD
- required: false
- type: string
- api_authfile:
- description:
- - If both O(api_username) or O(api_password) are not set, then it will try
- authenticate with ONE auth file. Default path is C(~/.one/one_auth).
- - Set environment variable E(ONE_AUTH) to override this path.
- env:
- - name: ONE_AUTH
- required: false
- type: string
- hostname:
- description: Field to match the hostname. Note V(v4_first_ip) corresponds to the first IPv4 found on VM.
- type: string
- default: v4_first_ip
- choices:
- - v4_first_ip
- - v6_first_ip
- - name
- filter_by_label:
- description: Only return servers filtered by this label.
- type: string
- group_by_labels:
- description: Create host groups by vm labels
- type: bool
- default: true
-'''
+ - URL of the OpenNebula RPC server.
+ - It is recommended to use HTTPS so that the username/password are not transferred over the network unencrypted.
+ - If not set then the value of the E(ONE_URL) environment variable is used.
+ env:
+ - name: ONE_URL
+ required: true
+ type: string
+ api_username:
+ description:
+ - Name of the user to login into the OpenNebula RPC server. If not set then the value of the E(ONE_USERNAME) environment
+ variable is used.
+ env:
+ - name: ONE_USERNAME
+ type: string
+ api_password:
+ description:
+ - Password or a token of the user to login into OpenNebula RPC server.
+ - If not set, the value of the E(ONE_PASSWORD) environment variable is used.
+ env:
+ - name: ONE_PASSWORD
+ required: false
+ type: string
+ api_authfile:
+ description:
+ - If both O(api_username) or O(api_password) are not set, then it tries to authenticate with ONE auth file. Default
+ path is C(~/.one/one_auth).
+ - Set environment variable E(ONE_AUTH) to override this path.
+ env:
+ - name: ONE_AUTH
+ required: false
+ type: string
+ hostname:
+ description: Field to match the hostname. Note V(v4_first_ip) corresponds to the first IPv4 found on VM.
+ type: string
+ default: v4_first_ip
+ choices:
+ - v4_first_ip
+ - v6_first_ip
+ - name
+ filter_by_label:
+ description: Only return servers filtered by this label.
+ type: string
+ group_by_labels:
+ description: Create host groups by VM labels.
+ type: bool
+ default: true
+"""
-EXAMPLES = r'''
+EXAMPLES = r"""
# inventory_opennebula.yml file in YAML format
# Example command line: ansible-inventory --list -i inventory_opennebula.yml
@@ -84,7 +82,7 @@ EXAMPLES = r'''
plugin: community.general.opennebula
api_url: https://opennebula:2633/RPC2
filter_by_label: Cache
-'''
+"""
try:
import pyone
diff --git a/plugins/inventory/proxmox.py b/plugins/inventory/proxmox.py
deleted file mode 100644
index 0f41cabe7d..0000000000
--- a/plugins/inventory/proxmox.py
+++ /dev/null
@@ -1,712 +0,0 @@
-# -*- coding: utf-8 -*-
-# Copyright (C) 2016 Guido Günther , Daniel Lobato Garcia
-# Copyright (c) 2018 Ansible Project
-# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
-# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import annotations
-
-
-DOCUMENTATION = '''
- name: proxmox
- short_description: Proxmox inventory source
- version_added: "1.2.0"
- author:
- - Jeffrey van Pelt (@Thulium-Drake)
- requirements:
- - requests >= 1.1
- description:
- - Get inventory hosts from a Proxmox PVE cluster.
- - "Uses a configuration file as an inventory source, it must end in C(.proxmox.yml) or C(.proxmox.yaml)"
- - Will retrieve the first network interface with an IP for Proxmox nodes.
- - Can retrieve LXC/QEMU configuration as facts.
- extends_documentation_fragment:
- - constructed
- - inventory_cache
- options:
- plugin:
- description: The name of this plugin, it should always be set to V(community.general.proxmox) for this plugin to recognize it as its own.
- required: true
- choices: ['community.general.proxmox']
- type: str
- url:
- description:
- - URL to Proxmox cluster.
- - If the value is not specified in the inventory configuration, the value of environment variable E(PROXMOX_URL) will be used instead.
- - Since community.general 4.7.0 you can also use templating to specify the value of the O(url).
- default: 'http://localhost:8006'
- type: str
- env:
- - name: PROXMOX_URL
- version_added: 2.0.0
- user:
- description:
- - Proxmox authentication user.
- - If the value is not specified in the inventory configuration, the value of environment variable E(PROXMOX_USER) will be used instead.
- - Since community.general 4.7.0 you can also use templating to specify the value of the O(user).
- required: true
- type: str
- env:
- - name: PROXMOX_USER
- version_added: 2.0.0
- password:
- description:
- - Proxmox authentication password.
- - If the value is not specified in the inventory configuration, the value of environment variable E(PROXMOX_PASSWORD) will be used instead.
- - Since community.general 4.7.0 you can also use templating to specify the value of the O(password).
- - If you do not specify a password, you must set O(token_id) and O(token_secret) instead.
- type: str
- env:
- - name: PROXMOX_PASSWORD
- version_added: 2.0.0
- token_id:
- description:
- - Proxmox authentication token ID.
- - If the value is not specified in the inventory configuration, the value of environment variable E(PROXMOX_TOKEN_ID) will be used instead.
- - To use token authentication, you must also specify O(token_secret). If you do not specify O(token_id) and O(token_secret),
- you must set a password instead.
- - Make sure to grant explicit pve permissions to the token or disable 'privilege separation' to use the users' privileges instead.
- version_added: 4.8.0
- type: str
- env:
- - name: PROXMOX_TOKEN_ID
- token_secret:
- description:
- - Proxmox authentication token secret.
- - If the value is not specified in the inventory configuration, the value of environment variable E(PROXMOX_TOKEN_SECRET) will be used instead.
- - To use token authentication, you must also specify O(token_id). If you do not specify O(token_id) and O(token_secret),
- you must set a password instead.
- version_added: 4.8.0
- type: str
- env:
- - name: PROXMOX_TOKEN_SECRET
- validate_certs:
- description: Verify SSL certificate if using HTTPS.
- type: boolean
- default: true
- group_prefix:
- description: Prefix to apply to Proxmox groups.
- default: proxmox_
- type: str
- facts_prefix:
- description: Prefix to apply to LXC/QEMU config facts.
- default: proxmox_
- type: str
- want_facts:
- description:
- - Gather LXC/QEMU configuration facts.
- - When O(want_facts) is set to V(true) more details about QEMU VM status are possible, besides the running and stopped states.
- Currently if the VM is running and it is suspended, the status will be running and the machine will be in C(running) group,
- but its actual state will be paused. See O(qemu_extended_statuses) for how to retrieve the real status.
- default: false
- type: bool
- qemu_extended_statuses:
- description:
- - Requires O(want_facts) to be set to V(true) to function. This will allow you to differentiate between C(paused) and C(prelaunch)
- statuses of the QEMU VMs.
- - This introduces multiple groups [prefixed with O(group_prefix)] C(prelaunch) and C(paused).
- default: false
- type: bool
- version_added: 5.1.0
- want_proxmox_nodes_ansible_host:
- version_added: 3.0.0
- description:
- - Whether to set C(ansible_host) for proxmox nodes.
- - When set to V(true) (default), will use the first available interface. This can be different from what you expect.
- - The default of this option changed from V(true) to V(false) in community.general 6.0.0.
- type: bool
- default: false
- exclude_nodes:
- description: Exclude proxmox nodes and the nodes-group from the inventory output.
- type: bool
- default: false
- version_added: 8.1.0
- filters:
- version_added: 4.6.0
- description: A list of Jinja templates that allow filtering hosts.
- type: list
- elements: str
- default: []
- strict:
- version_added: 2.5.0
- compose:
- version_added: 2.5.0
- groups:
- version_added: 2.5.0
- keyed_groups:
- version_added: 2.5.0
-'''
-
-EXAMPLES = '''
----
-# Minimal example which will not gather additional facts for QEMU/LXC guests
-# By not specifying a URL the plugin will attempt to connect to the controller host on port 8006
-# my.proxmox.yml
-plugin: community.general.proxmox
-user: ansible@pve
-password: secure
-# Note that this can easily give you wrong values as ansible_host. See further below for
-# an example where this is set to `false` and where ansible_host is set with `compose`.
-want_proxmox_nodes_ansible_host: true
-
----
-# Instead of login with password, proxmox supports api token authentication since release 6.2.
-plugin: community.general.proxmox
-user: ci@pve
-token_id: gitlab-1
-token_secret: fa256e9c-26ab-41ec-82da-707a2c079829
-
-# The secret can also be a vault string or passed via the environment variable TOKEN_SECRET.
-token_secret: !vault |
- $ANSIBLE_VAULT;1.1;AES256
- 62353634333163633336343265623632626339313032653563653165313262343931643431656138
- 6134333736323265656466646539663134306166666237630a653363623262636663333762316136
- 34616361326263383766366663393837626437316462313332663736623066656237386531663731
- 3037646432383064630a663165303564623338666131353366373630656661333437393937343331
- 32643131386134396336623736393634373936356332623632306561356361323737313663633633
- 6231313333666361656537343562333337323030623732323833
-
----
-# More complete example demonstrating the use of 'want_facts' and the constructed options
-# Note that using facts returned by 'want_facts' in constructed options requires 'want_facts=true'
-# my.proxmox.yml
-plugin: community.general.proxmox
-url: http://pve.domain.com:8006
-user: ansible@pve
-password: secure
-want_facts: true
-keyed_groups:
- # proxmox_tags_parsed is an example of a fact only returned when 'want_facts=true'
- - key: proxmox_tags_parsed
- separator: ""
- prefix: group
-groups:
- webservers: "'web' in (proxmox_tags_parsed|list)"
- mailservers: "'mail' in (proxmox_tags_parsed|list)"
-compose:
- ansible_port: 2222
-# Note that this can easily give you wrong values as ansible_host. See further below for
-# an example where this is set to `false` and where ansible_host is set with `compose`.
-want_proxmox_nodes_ansible_host: true
-
----
-# Using the inventory to allow ansible to connect via the first IP address of the VM / Container
-# (Default is connection by name of QEMU/LXC guests)
-# Note: my_inv_var demonstrates how to add a string variable to every host used by the inventory.
-# my.proxmox.yml
-plugin: community.general.proxmox
-url: http://192.168.1.2:8006
-user: ansible@pve
-password: secure
-validate_certs: false # only do this when you trust the network!
-want_facts: true
-want_proxmox_nodes_ansible_host: false
-compose:
- ansible_host: proxmox_ipconfig0.ip | default(proxmox_net0.ip) | ipaddr('address')
- my_inv_var_1: "'my_var1_value'"
- my_inv_var_2: >
- "my_var_2_value"
-
----
-# Specify the url, user and password using templating
-# my.proxmox.yml
-plugin: community.general.proxmox
-url: "{{ lookup('ansible.builtin.ini', 'url', section='proxmox', file='file.ini') }}"
-user: "{{ lookup('ansible.builtin.env','PM_USER') | default('ansible@pve') }}"
-password: "{{ lookup('community.general.random_string', base64=True) }}"
-# Note that this can easily give you wrong values as ansible_host. See further up for
-# an example where this is set to `false` and where ansible_host is set with `compose`.
-want_proxmox_nodes_ansible_host: true
-
-'''
-
-import itertools
-import re
-
-from ansible.module_utils.common._collections_compat import MutableMapping
-
-from ansible.errors import AnsibleError
-from ansible.plugins.inventory import BaseInventoryPlugin, Constructable, Cacheable
-from ansible.module_utils.six import string_types
-from ansible.module_utils.six.moves.urllib.parse import urlencode
-from ansible.utils.display import Display
-
-from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
-from ansible_collections.community.general.plugins.plugin_utils.unsafe import make_unsafe
-
-# 3rd party imports
-try:
- import requests
- if LooseVersion(requests.__version__) < LooseVersion('1.1.0'):
- raise ImportError
- HAS_REQUESTS = True
-except ImportError:
- HAS_REQUESTS = False
-
-display = Display()
-
-
-class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
- ''' Host inventory parser for ansible using Proxmox as source. '''
-
- NAME = 'community.general.proxmox'
-
- def __init__(self):
-
- super(InventoryModule, self).__init__()
-
- # from config
- self.proxmox_url = None
-
- self.session = None
- self.cache_key = None
- self.use_cache = None
-
- def verify_file(self, path):
-
- valid = False
- if super(InventoryModule, self).verify_file(path):
- if path.endswith(('proxmox.yaml', 'proxmox.yml')):
- valid = True
- else:
- self.display.vvv('Skipping due to inventory source not ending in "proxmox.yaml" nor "proxmox.yml"')
- return valid
-
- def _get_session(self):
- if not self.session:
- self.session = requests.session()
- self.session.verify = self.get_option('validate_certs')
- return self.session
-
- def _get_auth(self):
- validate_certs = self.get_option('validate_certs')
-
- if validate_certs is False:
- from requests.packages.urllib3 import disable_warnings
- disable_warnings()
-
- if self.proxmox_password:
- credentials = urlencode({'username': self.proxmox_user, 'password': self.proxmox_password})
- a = self._get_session()
- ret = a.post(f'{self.proxmox_url}/api2/json/access/ticket', data=credentials)
- json = ret.json()
- self.headers = {
- # only required for POST/PUT/DELETE methods, which we are not using currently
- # 'CSRFPreventionToken': json['data']['CSRFPreventionToken'],
- 'Cookie': f"PVEAuthCookie={json['data']['ticket']}"
- }
- else:
- # Clean and format token components
- user = self.proxmox_user.strip()
- token_id = self.proxmox_token_id.strip()
- token_secret = self.proxmox_token_secret.strip()
-
- # Build token string without newlines
- token = f'{user}!{token_id}={token_secret}'
-
- # Set headers with clean token
- self.headers = {'Authorization': f'PVEAPIToken={token}'}
-
- def _get_json(self, url, ignore_errors=None):
-
- data = []
- has_data = False
-
- if self.use_cache:
- try:
- data = self._cache[self.cache_key][url]
- has_data = True
- except KeyError:
- self.update_cache = True
-
- if not has_data:
- s = self._get_session()
- while True:
- ret = s.get(url, headers=self.headers)
- if ignore_errors and ret.status_code in ignore_errors:
- break
- ret.raise_for_status()
- json = ret.json()
-
- # process results
- # FIXME: This assumes 'return type' matches a specific query,
- # it will break if we expand the queries and they dont have different types
- if 'data' not in json:
- # /hosts/:id does not have a 'data' key
- data = json
- break
- elif isinstance(json['data'], MutableMapping):
- # /facts are returned as dict in 'data'
- data = json['data']
- break
- else:
- if json['data']:
- # /hosts 's 'results' is a list of all hosts, returned is paginated
- data = data + json['data']
- break
-
- self._results[url] = data
- return make_unsafe(data)
-
- def _get_nodes(self):
- return self._get_json(f"{self.proxmox_url}/api2/json/nodes")
-
- def _get_pools(self):
- return self._get_json(f"{self.proxmox_url}/api2/json/pools")
-
- def _get_lxc_per_node(self, node):
- return self._get_json(f"{self.proxmox_url}/api2/json/nodes/{node}/lxc")
-
- def _get_qemu_per_node(self, node):
- return self._get_json(f"{self.proxmox_url}/api2/json/nodes/{node}/qemu")
-
- def _get_members_per_pool(self, pool):
- ret = self._get_json(f"{self.proxmox_url}/api2/json/pools/{pool}")
- return ret['members']
-
- def _get_node_ip(self, node):
- ret = self._get_json(f"{self.proxmox_url}/api2/json/nodes/{node}/network")
-
- # sort interface by iface name to make selection as stable as possible
- ret.sort(key=lambda x: x['iface'])
-
- for iface in ret:
- try:
- # only process interfaces adhering to these rules
- if 'active' not in iface:
- self.display.vvv(f"Interface {iface['iface']} on node {node} does not have an active state")
- continue
- if 'address' not in iface:
- self.display.vvv(f"Interface {iface['iface']} on node {node} does not have an address")
- continue
- if 'gateway' not in iface:
- self.display.vvv(f"Interface {iface['iface']} on node {node} does not have a gateway")
- continue
- self.display.vv(f"Using interface {iface['iface']} on node {node} with address {iface['address']} as node ip for ansible_host")
- return iface['address']
- except Exception:
- continue
- return None
-
- def _get_lxc_interfaces(self, properties, node, vmid):
- status_key = self._fact('status')
-
- if status_key not in properties or not properties[status_key] == 'running':
- return
-
- ret = self._get_json(f"{self.proxmox_url}/api2/json/nodes/{node}/lxc/{vmid}/interfaces", ignore_errors=[501])
- if not ret:
- return
-
- result = []
-
- for iface in ret:
- result_iface = {
- 'name': iface['name'],
- 'hwaddr': iface['hwaddr']
- }
-
- if 'inet' in iface:
- result_iface['inet'] = iface['inet']
-
- if 'inet6' in iface:
- result_iface['inet6'] = iface['inet6']
-
- result.append(result_iface)
-
- properties[self._fact('lxc_interfaces')] = result
-
- def _get_agent_network_interfaces(self, node, vmid, vmtype):
- result = []
-
- try:
- ifaces = self._get_json(
- f"{self.proxmox_url}/api2/json/nodes/{node}/{vmtype}/{vmid}/agent/network-get-interfaces"
- )['result']
-
- if "error" in ifaces:
- if "class" in ifaces["error"]:
- # This happens on Windows, even though qemu agent is running, the IP address
- # cannot be fetched, as it is unsupported, also a command disabled can happen.
- errorClass = ifaces["error"]["class"]
- if errorClass in ["Unsupported"]:
- self.display.v("Retrieving network interfaces from guest agents on windows with older qemu-guest-agents is not supported")
- elif errorClass in ["CommandDisabled"]:
- self.display.v("Retrieving network interfaces from guest agents has been disabled")
- return result
-
- for iface in ifaces:
- result.append({
- 'name': iface['name'],
- 'mac-address': iface['hardware-address'] if 'hardware-address' in iface else '',
- 'ip-addresses': [f"{ip['ip-address']}/{ip['prefix']}" for ip in iface['ip-addresses']] if 'ip-addresses' in iface else []
- })
- except requests.HTTPError:
- pass
-
- return result
-
- def _get_vm_config(self, properties, node, vmid, vmtype, name):
- ret = self._get_json(f"{self.proxmox_url}/api2/json/nodes/{node}/{vmtype}/{vmid}/config")
-
- properties[self._fact('node')] = node
- properties[self._fact('vmid')] = vmid
- properties[self._fact('vmtype')] = vmtype
-
- plaintext_configs = [
- 'description',
- ]
-
- for config in ret:
- key = self._fact(config)
- value = ret[config]
- try:
- # fixup disk images as they have no key
- if config == 'rootfs' or config.startswith(('virtio', 'sata', 'ide', 'scsi')):
- value = f"disk_image={value}"
-
- # Additional field containing parsed tags as list
- if config == 'tags':
- stripped_value = value.strip()
- if stripped_value:
- parsed_key = f"{key}_parsed"
- properties[parsed_key] = [tag.strip() for tag in stripped_value.replace(',', ';').split(";")]
-
- # The first field in the agent string tells you whether the agent is enabled
- # the rest of the comma separated string is extra config for the agent.
- # In some (newer versions of proxmox) instances it can be 'enabled=1'.
- if config == 'agent':
- agent_enabled = 0
- try:
- agent_enabled = int(value.split(',')[0])
- except ValueError:
- if value.split(',')[0] == "enabled=1":
- agent_enabled = 1
- if agent_enabled:
- agent_iface_value = self._get_agent_network_interfaces(node, vmid, vmtype)
- if agent_iface_value:
- agent_iface_key = self.to_safe(f'{key}_interfaces')
- properties[agent_iface_key] = agent_iface_value
-
- if config == 'lxc':
- out_val = {}
- for k, v in value:
- if k.startswith('lxc.'):
- k = k[len('lxc.'):]
- out_val[k] = v
- value = out_val
-
- if config not in plaintext_configs and isinstance(value, string_types) \
- and all("=" in v for v in value.split(",")):
- # split off strings with commas to a dict
- # skip over any keys that cannot be processed
- try:
- value = dict(key.split("=", 1) for key in value.split(","))
- except Exception:
- continue
-
- properties[key] = value
- except NameError:
- return None
-
- def _get_vm_status(self, properties, node, vmid, vmtype, name):
- ret = self._get_json(f"{self.proxmox_url}/api2/json/nodes/{node}/{vmtype}/{vmid}/status/current")
- properties[self._fact('status')] = ret['status']
- if vmtype == 'qemu':
- properties[self._fact('qmpstatus')] = ret['qmpstatus']
-
- def _get_vm_snapshots(self, properties, node, vmid, vmtype, name):
- ret = self._get_json(f"{self.proxmox_url}/api2/json/nodes/{node}/{vmtype}/{vmid}/snapshot")
- snapshots = [snapshot['name'] for snapshot in ret if snapshot['name'] != 'current']
- properties[self._fact('snapshots')] = snapshots
-
- def to_safe(self, word):
- '''Converts 'bad' characters in a string to underscores so they can be used as Ansible groups
- #> ProxmoxInventory.to_safe("foo-bar baz")
- 'foo_barbaz'
- '''
- regex = r"[^A-Za-z0-9\_]"
- return re.sub(regex, "_", word.replace(" ", ""))
-
- def _fact(self, name):
- '''Generate a fact's full name from the common prefix and a name.'''
- return self.to_safe(f'{self.facts_prefix}{name.lower()}')
-
- def _group(self, name):
- '''Generate a group's full name from the common prefix and a name.'''
- return self.to_safe(f'{self.group_prefix}{name.lower()}')
-
- def _can_add_host(self, name, properties):
- '''Ensure that a host satisfies all defined hosts filters. If strict mode is
- enabled, any error during host filter compositing will lead to an AnsibleError
- being raised, otherwise the filter will be ignored.
- '''
- for host_filter in self.host_filters:
- try:
- if not self._compose(host_filter, properties):
- return False
- except Exception as e: # pylint: disable=broad-except
- message = f"Could not evaluate host filter {host_filter} for host {name} - {e}"
- if self.strict:
- raise AnsibleError(message)
- display.warning(message)
- return True
-
- def _add_host(self, name, variables):
- self.inventory.add_host(name)
- for k, v in variables.items():
- self.inventory.set_variable(name, k, v)
- variables = self.inventory.get_host(name).get_vars()
- self._set_composite_vars(self.get_option('compose'), variables, name, strict=self.strict)
- self._add_host_to_composed_groups(self.get_option('groups'), variables, name, strict=self.strict)
- self._add_host_to_keyed_groups(self.get_option('keyed_groups'), variables, name, strict=self.strict)
-
- def _handle_item(self, node, ittype, item):
- '''Handle an item from the list of LXC containers and Qemu VM. The
- return value will be either None if the item was skipped or the name of
- the item if it was added to the inventory.'''
- if item.get('template'):
- return None
-
- properties = dict()
- name, vmid = item['name'], item['vmid']
-
- # get status, config and snapshots if want_facts == True
- want_facts = self.get_option('want_facts')
- if want_facts:
- self._get_vm_status(properties, node, vmid, ittype, name)
- self._get_vm_config(properties, node, vmid, ittype, name)
- self._get_vm_snapshots(properties, node, vmid, ittype, name)
-
- if ittype == 'lxc':
- self._get_lxc_interfaces(properties, node, vmid)
-
- # ensure the host satisfies filters
- if not self._can_add_host(name, properties):
- return None
-
- # add the host to the inventory
- self._add_host(name, properties)
- node_type_group = self._group(f'{node}_{ittype}')
- self.inventory.add_child(self._group(f"all_{ittype}"), name)
- self.inventory.add_child(node_type_group, name)
-
- item_status = item['status']
- if item_status == 'running':
- if want_facts and ittype == 'qemu' and self.get_option('qemu_extended_statuses'):
- # get more details about the status of the qemu VM
- item_status = properties.get(self._fact('qmpstatus'), item_status)
- self.inventory.add_child(self._group(f'all_{item_status}'), name)
-
- return name
-
- def _populate_pool_groups(self, added_hosts):
- '''Generate groups from Proxmox resource pools, ignoring VMs and
- containers that were skipped.'''
- for pool in self._get_pools():
- poolid = pool.get('poolid')
- if not poolid:
- continue
- pool_group = self._group(f"pool_{poolid}")
- self.inventory.add_group(pool_group)
-
- for member in self._get_members_per_pool(poolid):
- name = member.get('name')
- if name and name in added_hosts:
- self.inventory.add_child(pool_group, name)
-
- def _populate(self):
-
- # create common groups
- default_groups = ['lxc', 'qemu', 'running', 'stopped']
-
- if self.get_option('qemu_extended_statuses'):
- default_groups.extend(['prelaunch', 'paused'])
-
- for group in default_groups:
- self.inventory.add_group(self._group(f'all_{group}'))
- nodes_group = self._group('nodes')
- if not self.exclude_nodes:
- self.inventory.add_group(nodes_group)
-
- want_proxmox_nodes_ansible_host = self.get_option("want_proxmox_nodes_ansible_host")
-
- # gather vm's on nodes
- self._get_auth()
- hosts = []
- for node in self._get_nodes():
- if not node.get('node'):
- continue
- if not self.exclude_nodes:
- self.inventory.add_host(node['node'])
- if node['type'] == 'node' and not self.exclude_nodes:
- self.inventory.add_child(nodes_group, node['node'])
-
- if node['status'] == 'offline':
- continue
-
- # get node IP address
- if want_proxmox_nodes_ansible_host and not self.exclude_nodes:
- ip = self._get_node_ip(node['node'])
- self.inventory.set_variable(node['node'], 'ansible_host', ip)
-
- # Setting composite variables
- if not self.exclude_nodes:
- variables = self.inventory.get_host(node['node']).get_vars()
- self._set_composite_vars(self.get_option('compose'), variables, node['node'], strict=self.strict)
-
- # add LXC/Qemu groups for the node
- for ittype in ('lxc', 'qemu'):
- node_type_group = self._group(f"{node['node']}_{ittype}")
- self.inventory.add_group(node_type_group)
-
- # get LXC containers and Qemu VMs for this node
- lxc_objects = zip(itertools.repeat('lxc'), self._get_lxc_per_node(node['node']))
- qemu_objects = zip(itertools.repeat('qemu'), self._get_qemu_per_node(node['node']))
- for ittype, item in itertools.chain(lxc_objects, qemu_objects):
- name = self._handle_item(node['node'], ittype, item)
- if name is not None:
- hosts.append(name)
-
- # gather vm's in pools
- self._populate_pool_groups(hosts)
-
- def parse(self, inventory, loader, path, cache=True):
- if not HAS_REQUESTS:
- raise AnsibleError('This module requires Python Requests 1.1.0 or higher: '
- 'https://github.com/psf/requests.')
-
- super(InventoryModule, self).parse(inventory, loader, path)
-
- # read config from file, this sets 'options'
- self._read_config_data(path)
-
- # read and template auth options
- for o in ('url', 'user', 'password', 'token_id', 'token_secret'):
- v = self.get_option(o)
- if self.templar.is_template(v):
- v = self.templar.template(v, disable_lookups=False)
- setattr(self, f'proxmox_{o}', v)
-
- # some more cleanup and validation
- self.proxmox_url = self.proxmox_url.rstrip('/')
-
- if self.proxmox_password is None and (self.proxmox_token_id is None or self.proxmox_token_secret is None):
- raise AnsibleError('You must specify either a password or both token_id and token_secret.')
-
- if self.get_option('qemu_extended_statuses') and not self.get_option('want_facts'):
- raise AnsibleError('You must set want_facts to True if you want to use qemu_extended_statuses.')
- # read rest of options
- self.exclude_nodes = self.get_option('exclude_nodes')
- self.cache_key = self.get_cache_key(path)
- self.use_cache = cache and self.get_option('cache')
- self.update_cache = not cache and self.get_option('cache')
- self.host_filters = self.get_option('filters')
- self.group_prefix = self.get_option('group_prefix')
- self.facts_prefix = self.get_option('facts_prefix')
- self.strict = self.get_option('strict')
-
- # actually populate inventory
- self._results = {}
- self._populate()
- if self.update_cache:
- self._cache[self.cache_key] = self._results
diff --git a/plugins/inventory/scaleway.py b/plugins/inventory/scaleway.py
index d815890df4..c730049833 100644
--- a/plugins/inventory/scaleway.py
+++ b/plugins/inventory/scaleway.py
@@ -6,73 +6,72 @@
from __future__ import annotations
-DOCUMENTATION = r'''
- name: scaleway
- author:
- - Remy Leone (@remyleone)
- short_description: Scaleway inventory source
+DOCUMENTATION = r"""
+name: scaleway
+author:
+ - Remy Leone (@remyleone)
+short_description: Scaleway inventory source
+description:
+ - Get inventory hosts from Scaleway.
+requirements:
+ - PyYAML
+options:
+ plugin:
+ description: Token that ensures this is a source file for the 'scaleway' plugin.
+ required: true
+ type: string
+ choices: ['scaleway', 'community.general.scaleway']
+ regions:
+ description: Filter results on a specific Scaleway region.
+ type: list
+ elements: string
+ default:
+ - ams1
+ - par1
+ - par2
+ - waw1
+ tags:
+ description: Filter results on a specific tag.
+ type: list
+ elements: string
+ scw_profile:
description:
- - Get inventory hosts from Scaleway.
- requirements:
- - PyYAML
- options:
- plugin:
- description: Token that ensures this is a source file for the 'scaleway' plugin.
- required: true
- type: string
- choices: ['scaleway', 'community.general.scaleway']
- regions:
- description: Filter results on a specific Scaleway region.
- type: list
- elements: string
- default:
- - ams1
- - par1
- - par2
- - waw1
- tags:
- description: Filter results on a specific tag.
- type: list
- elements: string
- scw_profile:
- description:
- - The config profile to use in config file.
- - By default uses the one specified as C(active_profile) in the config file, or falls back to V(default) if that is not defined.
- type: string
- version_added: 4.4.0
- oauth_token:
- description:
- - Scaleway OAuth token.
- - If not explicitly defined or in environment variables, it will try to lookup in the scaleway-cli configuration file
- (C($SCW_CONFIG_PATH), C($XDG_CONFIG_HOME/scw/config.yaml), or C(~/.config/scw/config.yaml)).
- - More details on L(how to generate token, https://www.scaleway.com/en/docs/generate-api-keys/).
- type: string
- env:
- # in order of precedence
- - name: SCW_TOKEN
- - name: SCW_API_KEY
- - name: SCW_OAUTH_TOKEN
- hostnames:
- description: List of preference about what to use as an hostname.
- type: list
- elements: string
- default:
- - public_ipv4
- choices:
- - public_ipv4
- - private_ipv4
- - public_ipv6
- - hostname
- - id
- variables:
- description: 'Set individual variables: keys are variable names and
- values are templates. Any value returned by the
- L(Scaleway API, https://developer.scaleway.com/#servers-server-get)
- can be used.'
- type: dict
-'''
+ - The config profile to use in config file.
+ - By default uses the one specified as C(active_profile) in the config file, or falls back to V(default) if that is
+ not defined.
+ type: string
+ version_added: 4.4.0
+ oauth_token:
+ description:
+ - Scaleway OAuth token.
+ - If not explicitly defined or in environment variables, it tries to lookup in the C(scaleway-cli) configuration file
+ (C($SCW_CONFIG_PATH), C($XDG_CONFIG_HOME/scw/config.yaml), or C(~/.config/scw/config.yaml)).
+ - More details on L(how to generate token, https://www.scaleway.com/en/docs/generate-api-keys/).
+ type: string
+ env:
+ # in order of precedence
+ - name: SCW_TOKEN
+ - name: SCW_API_KEY
+ - name: SCW_OAUTH_TOKEN
+ hostnames:
+ description: List of preference about what to use as an hostname.
+ type: list
+ elements: string
+ default:
+ - public_ipv4
+ choices:
+ - public_ipv4
+ - private_ipv4
+ - public_ipv6
+ - hostname
+ - id
+ variables:
+ description: 'Set individual variables: keys are variable names and values are templates. Any value returned by the L(Scaleway
+ API, https://developer.scaleway.com/#servers-server-get) can be used.'
+ type: dict
+"""
-EXAMPLES = r'''
+EXAMPLES = r"""
# scaleway_inventory.yml file in YAML format
# Example command line: ansible-inventory --list -i scaleway_inventory.yml
@@ -110,7 +109,7 @@ variables:
ansible_host: public_ip.address
ansible_connection: "'ssh'"
ansible_user: "'admin'"
-'''
+"""
import os
import json
diff --git a/plugins/inventory/stackpath_compute.py b/plugins/inventory/stackpath_compute.py
deleted file mode 100644
index e219f92641..0000000000
--- a/plugins/inventory/stackpath_compute.py
+++ /dev/null
@@ -1,285 +0,0 @@
-# -*- coding: utf-8 -*-
-# Copyright (c) 2020 Shay Rybak
-# Copyright (c) 2020 Ansible Project
-# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-from __future__ import annotations
-
-DOCUMENTATION = r"""
-name: stackpath_compute
-short_description: StackPath Edge Computing inventory source
-version_added: 1.2.0
-author:
- - UNKNOWN (@shayrybak)
-deprecated:
- removed_in: 11.0.0
- why: Stackpath (the company) ceased its operations in June 2024. The API URL this plugin relies on is not found in DNS.
- alternative: There is none.
-extends_documentation_fragment:
- - inventory_cache
- - constructed
-description:
- - Get inventory hosts from StackPath Edge Computing.
- - Uses a YAML configuration file that ends with stackpath_compute.(yml|yaml).
-options:
- plugin:
- description:
- - A token that ensures this is a source file for the plugin.
- required: true
- type: string
- choices: ['community.general.stackpath_compute']
- client_id:
- description:
- - An OAuth client ID generated from the API Management section of the StackPath customer portal U(https://control.stackpath.net/api-management).
- required: true
- type: str
- client_secret:
- description:
- - An OAuth client secret generated from the API Management section of the StackPath customer portal U(https://control.stackpath.net/api-management).
- required: true
- type: str
- stack_slugs:
- description:
- - A list of Stack slugs to query instances in. If no entry then get instances in all stacks on the account.
- type: list
- elements: str
- use_internal_ip:
- description:
- - Whether or not to use internal IP addresses, If false, uses external IP addresses, internal otherwise.
- - If an instance doesn't have an external IP it will not be returned when this option is set to false.
- type: bool
-"""
-
-EXAMPLES = r"""
-plugin: community.general.stackpath_compute
-client_id: my_client_id
-client_secret: my_client_secret
-stack_slugs:
- - my_first_stack_slug
- - my_other_stack_slug
-use_internal_ip: false
-"""
-
-import traceback
-import json
-
-from ansible.errors import AnsibleError
-from ansible.module_utils.urls import open_url
-from ansible.plugins.inventory import (
- BaseInventoryPlugin,
- Constructable,
- Cacheable
-)
-from ansible.utils.display import Display
-
-from ansible_collections.community.general.plugins.plugin_utils.unsafe import make_unsafe
-
-
-display = Display()
-
-
-class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
-
- NAME = 'community.general.stackpath_compute'
-
- def __init__(self):
- super(InventoryModule, self).__init__()
-
- # credentials
- self.client_id = None
- self.client_secret = None
- self.stack_slug = None
- self.api_host = "https://gateway.stackpath.com"
- self.group_keys = [
- "stackSlug",
- "workloadId",
- "cityCode",
- "countryCode",
- "continent",
- "target",
- "name",
- "workloadSlug"
- ]
-
- def _validate_config(self, config):
- if config['plugin'] != 'community.general.stackpath_compute':
- raise AnsibleError("plugin doesn't match this plugin")
- try:
- client_id = config['client_id']
- if len(client_id) != 32:
- raise AnsibleError("client_id must be 32 characters long")
- except KeyError:
- raise AnsibleError("config missing client_id, a required option")
- try:
- client_secret = config['client_secret']
- if len(client_secret) != 64:
- raise AnsibleError("client_secret must be 64 characters long")
- except KeyError:
- raise AnsibleError("config missing client_id, a required option")
- return True
-
- def _set_credentials(self):
- '''
- :param config_data: contents of the inventory config file
- '''
- self.client_id = self.get_option('client_id')
- self.client_secret = self.get_option('client_secret')
-
- def _authenticate(self):
- payload = json.dumps(
- {
- "client_id": self.client_id,
- "client_secret": self.client_secret,
- "grant_type": "client_credentials",
- }
- )
- headers = {
- "Content-Type": "application/json",
- }
- resp = open_url(
- f"{self.api_host}/identity/v1/oauth2/token",
- headers=headers,
- data=payload,
- method="POST"
- )
- status_code = resp.code
- if status_code == 200:
- body = resp.read()
- self.auth_token = json.loads(body)["access_token"]
-
- def _query(self):
- results = []
- workloads = []
- self._authenticate()
- for stack_slug in self.stack_slugs:
- try:
- workloads = self._stackpath_query_get_list(f"{self.api_host}/workload/v1/stacks/{stack_slug}/workloads")
- except Exception:
- raise AnsibleError(f"Failed to get workloads from the StackPath API: {traceback.format_exc()}")
- for workload in workloads:
- try:
- workload_instances = self._stackpath_query_get_list(
- f"{self.api_host}/workload/v1/stacks/{stack_slug}/workloads/{workload['id']}/instances"
- )
- except Exception:
- raise AnsibleError(f"Failed to get workload instances from the StackPath API: {traceback.format_exc()}")
- for instance in workload_instances:
- if instance["phase"] == "RUNNING":
- instance["stackSlug"] = stack_slug
- instance["workloadId"] = workload["id"]
- instance["workloadSlug"] = workload["slug"]
- instance["cityCode"] = instance["location"]["cityCode"]
- instance["countryCode"] = instance["location"]["countryCode"]
- instance["continent"] = instance["location"]["continent"]
- instance["target"] = instance["metadata"]["labels"]["workload.platform.stackpath.net/target-name"]
- try:
- if instance[self.hostname_key]:
- results.append(instance)
- except KeyError:
- pass
- return results
-
- def _populate(self, instances):
- for instance in instances:
- for group_key in self.group_keys:
- group = f"{group_key}_{instance[group_key]}"
- group = group.lower().replace(" ", "_").replace("-", "_")
- self.inventory.add_group(group)
- self.inventory.add_host(instance[self.hostname_key],
- group=group)
-
- def _stackpath_query_get_list(self, url):
- self._authenticate()
- headers = {
- "Content-Type": "application/json",
- "Authorization": f"Bearer {self.auth_token}",
- }
- next_page = True
- result = []
- cursor = '-1'
- while next_page:
- resp = open_url(
- f"{url}?page_request.first=10&page_request.after={cursor}",
- headers=headers,
- method="GET"
- )
- status_code = resp.code
- if status_code == 200:
- body = resp.read()
- body_json = json.loads(body)
- result.extend(body_json["results"])
- next_page = body_json["pageInfo"]["hasNextPage"]
- if next_page:
- cursor = body_json["pageInfo"]["endCursor"]
- return result
-
- def _get_stack_slugs(self, stacks):
- self.stack_slugs = [stack["slug"] for stack in stacks]
-
- def verify_file(self, path):
- '''
- :param loader: an ansible.parsing.dataloader.DataLoader object
- :param path: the path to the inventory config file
- :return the contents of the config file
- '''
- if super(InventoryModule, self).verify_file(path):
- if path.endswith(('stackpath_compute.yml', 'stackpath_compute.yaml')):
- return True
- display.debug(
- "stackpath_compute inventory filename must end with \
- 'stackpath_compute.yml' or 'stackpath_compute.yaml'"
- )
- return False
-
- def parse(self, inventory, loader, path, cache=True):
-
- super(InventoryModule, self).parse(inventory, loader, path)
-
- config = self._read_config_data(path)
- self._validate_config(config)
- self._set_credentials()
-
- # get user specifications
- self.use_internal_ip = self.get_option('use_internal_ip')
- if self.use_internal_ip:
- self.hostname_key = "ipAddress"
- else:
- self.hostname_key = "externalIpAddress"
-
- self.stack_slugs = self.get_option('stack_slugs')
- if not self.stack_slugs:
- try:
- stacks = self._stackpath_query_get_list(f"{self.api_host}/stack/v1/stacks")
- self._get_stack_slugs(stacks)
- except Exception:
- raise AnsibleError(f"Failed to get stack IDs from the Stackpath API: {traceback.format_exc()}")
-
- cache_key = self.get_cache_key(path)
- # false when refresh_cache or --flush-cache is used
- if cache:
- # get the user-specified directive
- cache = self.get_option('cache')
-
- # Generate inventory
- cache_needs_update = False
- if cache:
- try:
- results = self._cache[cache_key]
- except KeyError:
- # if cache expires or cache file doesn't exist
- cache_needs_update = True
-
- if not cache or cache_needs_update:
- results = self._query()
-
- self._populate(make_unsafe(results))
-
- # If the cache has expired/doesn't exist or
- # if refresh_inventory/flush cache is used
- # when the user is using caching, update the cached inventory
- try:
- if cache_needs_update or (not cache and self.get_option('cache')):
- self._cache[cache_key] = results
- except Exception:
- raise AnsibleError(f"Failed to populate data: {traceback.format_exc()}")
diff --git a/plugins/inventory/virtualbox.py b/plugins/inventory/virtualbox.py
index db2750f636..2eb52a617c 100644
--- a/plugins/inventory/virtualbox.py
+++ b/plugins/inventory/virtualbox.py
@@ -5,56 +5,57 @@
from __future__ import annotations
-DOCUMENTATION = '''
- author: Unknown (!UNKNOWN)
- name: virtualbox
- short_description: virtualbox inventory source
+DOCUMENTATION = r"""
+author: Unknown (!UNKNOWN)
+name: virtualbox
+short_description: Virtualbox inventory source
+description:
+ - Get inventory hosts from the local virtualbox installation.
+ - Uses a YAML configuration file that ends with virtualbox.(yml|yaml) or vbox.(yml|yaml).
+ - The inventory_hostname is always the 'Name' of the virtualbox instance.
+ - Groups can be assigned to the VMs using C(VBoxManage). Multiple groups can be assigned by using V(/) as a delimeter.
+ - A separate parameter, O(enable_advanced_group_parsing) is exposed to change grouping behaviour. See the parameter documentation
+ for details.
+extends_documentation_fragment:
+ - constructed
+ - inventory_cache
+options:
+ plugin:
+ description: Token that ensures this is a source file for the P(community.general.virtualbox#inventory) plugin.
+ type: string
+ required: true
+ choices: ['virtualbox', 'community.general.virtualbox']
+ running_only:
+ description: Toggles showing all VMs instead of only those currently running.
+ type: boolean
+ default: false
+ settings_password_file:
+ description: Provide a file containing the settings password (equivalent to C(--settingspwfile)).
+ type: string
+ network_info_path:
+ description: Property path to query for network information (C(ansible_host)).
+ type: string
+ default: "/VirtualBox/GuestInfo/Net/0/V4/IP"
+ query:
+ description: Create vars from virtualbox properties.
+ type: dictionary
+ default: {}
+ enable_advanced_group_parsing:
description:
- - Get inventory hosts from the local virtualbox installation.
- - Uses a YAML configuration file that ends with virtualbox.(yml|yaml) or vbox.(yml|yaml).
- - The inventory_hostname is always the 'Name' of the virtualbox instance.
- - Groups can be assigned to the VMs using C(VBoxManage). Multiple groups can be assigned by using V(/) as a delimeter.
- - A separate parameter, O(enable_advanced_group_parsing) is exposed to change grouping behaviour. See the parameter documentation for details.
- extends_documentation_fragment:
- - constructed
- - inventory_cache
- options:
- plugin:
- description: token that ensures this is a source file for the 'virtualbox' plugin
- type: string
- required: true
- choices: ['virtualbox', 'community.general.virtualbox']
- running_only:
- description: toggles showing all vms vs only those currently running
- type: boolean
- default: false
- settings_password_file:
- description: provide a file containing the settings password (equivalent to --settingspwfile)
- type: string
- network_info_path:
- description: property path to query for network information (ansible_host)
- type: string
- default: "/VirtualBox/GuestInfo/Net/0/V4/IP"
- query:
- description: create vars from virtualbox properties
- type: dictionary
- default: {}
- enable_advanced_group_parsing:
- description:
- - The default group parsing rule (when this setting is set to V(false)) is to split the VirtualBox VM's group based on the V(/) character and
- assign the resulting list elements as an Ansible Group.
- - Setting O(enable_advanced_group_parsing=true) changes this behaviour to match VirtualBox's interpretation of groups according to
- U(https://www.virtualbox.org/manual/UserManual.html#gui-vmgroups).
- Groups are now split using the V(,) character, and the V(/) character indicates nested groups.
- - When enabled, a VM that's been configured using V(VBoxManage modifyvm "vm01" --groups "/TestGroup/TestGroup2,/TestGroup3") will result in
- the group C(TestGroup2) being a child group of C(TestGroup); and
- the VM being a part of C(TestGroup2) and C(TestGroup3).
- default: false
- type: bool
- version_added: 9.2.0
-'''
+ - The default group parsing rule (when this setting is set to V(false)) is to split the VirtualBox VM's group based
+ on the V(/) character and assign the resulting list elements as an Ansible Group.
+ - Setting O(enable_advanced_group_parsing=true) changes this behaviour to match VirtualBox's interpretation of groups
+ according to U(https://www.virtualbox.org/manual/UserManual.html#gui-vmgroups). Groups are now split using the V(,)
+ character, and the V(/) character indicates nested groups.
+ - When enabled, a VM that's been configured using V(VBoxManage modifyvm "vm01" --groups "/TestGroup/TestGroup2,/TestGroup3")
+ results in the group C(TestGroup2) being a child group of C(TestGroup); and the VM being a part of C(TestGroup2)
+ and C(TestGroup3).
+ default: false
+ type: bool
+ version_added: 9.2.0
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
---
# file must be named vbox.yaml or vbox.yml
plugin: community.general.virtualbox
@@ -69,7 +70,7 @@ compose:
plugin: community.general.virtualbox
groups:
container: "'minis' in (inventory_hostname)"
-'''
+"""
import os
diff --git a/plugins/inventory/xen_orchestra.py b/plugins/inventory/xen_orchestra.py
index 4a6d431a7d..e6d828845a 100644
--- a/plugins/inventory/xen_orchestra.py
+++ b/plugins/inventory/xen_orchestra.py
@@ -5,76 +5,81 @@
from __future__ import annotations
-DOCUMENTATION = '''
- name: xen_orchestra
- short_description: Xen Orchestra inventory source
- version_added: 4.1.0
- author:
- - Dom Del Nano (@ddelnano)
- - Samori Gorse (@shinuza)
- requirements:
- - websocket-client >= 1.0.0
+DOCUMENTATION = r"""
+name: xen_orchestra
+short_description: Xen Orchestra inventory source
+version_added: 4.1.0
+author:
+ - Dom Del Nano (@ddelnano)
+ - Samori Gorse (@shinuza)
+requirements:
+ - websocket-client >= 1.0.0
+description:
+ - Get inventory hosts from a Xen Orchestra deployment.
+ - Uses a configuration file as an inventory source, it must end in C(.xen_orchestra.yml) or C(.xen_orchestra.yaml).
+extends_documentation_fragment:
+ - constructed
+ - inventory_cache
+options:
+ plugin:
+ description: The name of this plugin, it should always be set to V(community.general.xen_orchestra) for this plugin to
+ recognize it as its own.
+ required: true
+ choices: ['community.general.xen_orchestra']
+ type: str
+ api_host:
description:
- - Get inventory hosts from a Xen Orchestra deployment.
- - 'Uses a configuration file as an inventory source, it must end in C(.xen_orchestra.yml) or C(.xen_orchestra.yaml).'
- extends_documentation_fragment:
- - constructed
- - inventory_cache
- options:
- plugin:
- description: The name of this plugin, it should always be set to V(community.general.xen_orchestra) for this plugin to recognize it as its own.
- required: true
- choices: ['community.general.xen_orchestra']
- type: str
- api_host:
- description:
- - API host to XOA API.
- - If the value is not specified in the inventory configuration, the value of environment variable E(ANSIBLE_XO_HOST) will be used instead.
- type: str
- env:
- - name: ANSIBLE_XO_HOST
- user:
- description:
- - Xen Orchestra user.
- - If the value is not specified in the inventory configuration, the value of environment variable E(ANSIBLE_XO_USER) will be used instead.
- required: true
- type: str
- env:
- - name: ANSIBLE_XO_USER
- password:
- description:
- - Xen Orchestra password.
- - If the value is not specified in the inventory configuration, the value of environment variable E(ANSIBLE_XO_PASSWORD) will be used instead.
- required: true
- type: str
- env:
- - name: ANSIBLE_XO_PASSWORD
- validate_certs:
- description: Verify TLS certificate if using HTTPS.
- type: boolean
- default: true
- use_ssl:
- description: Use wss when connecting to the Xen Orchestra API
- type: boolean
- default: true
- use_vm_uuid:
- description:
- - Import Xen VMs to inventory using their UUID as the VM entry name.
- - If set to V(false) use VM name labels instead of UUIDs.
- type: boolean
- default: true
- version_added: 10.4.0
- use_host_uuid:
- description:
- - Import Xen Hosts to inventory using their UUID as the Host entry name.
- - If set to V(false) use Host name labels instead of UUIDs.
- type: boolean
- default: true
- version_added: 10.4.0
-'''
+ - API host to XOA API.
+ - If the value is not specified in the inventory configuration, the value of environment variable E(ANSIBLE_XO_HOST)
+ is used instead.
+ type: str
+ env:
+ - name: ANSIBLE_XO_HOST
+ user:
+ description:
+ - Xen Orchestra user.
+ - If the value is not specified in the inventory configuration, the value of environment variable E(ANSIBLE_XO_USER)
+ is used instead.
+ required: true
+ type: str
+ env:
+ - name: ANSIBLE_XO_USER
+ password:
+ description:
+ - Xen Orchestra password.
+ - If the value is not specified in the inventory configuration, the value of environment variable E(ANSIBLE_XO_PASSWORD)
+ is used instead.
+ required: true
+ type: str
+ env:
+ - name: ANSIBLE_XO_PASSWORD
+ validate_certs:
+ description: Verify TLS certificate if using HTTPS.
+ type: boolean
+ default: true
+ use_ssl:
+ description: Use wss when connecting to the Xen Orchestra API.
+ type: boolean
+ default: true
+ use_vm_uuid:
+ description:
+ - Import Xen VMs to inventory using their UUID as the VM entry name.
+ - If set to V(false) use VM name labels instead of UUIDs.
+ type: boolean
+ default: true
+ version_added: 10.4.0
+ use_host_uuid:
+ description:
+ - Import Xen Hosts to inventory using their UUID as the Host entry name.
+ - If set to V(false) use Host name labels instead of UUIDs.
+ type: boolean
+ default: true
+ version_added: 10.4.0
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
+---
# file must be named xen_orchestra.yaml or xen_orchestra.yml
plugin: community.general.xen_orchestra
api_host: 192.168.1.255
@@ -83,13 +88,12 @@ password: xo_pwd
validate_certs: true
use_ssl: true
groups:
- kube_nodes: "'kube_node' in tags"
+ kube_nodes: "'kube_node' in tags"
compose:
- ansible_port: 2222
+ ansible_port: 2222
use_vm_uuid: false
use_host_uuid: true
-
-'''
+"""
import json
import ssl
@@ -220,7 +224,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
vm_name_list.append(vm['name_label'])
else:
vm_duplicate_count = vm_name_list.count(vm['name_label'])
- entry_name = vm['name_label'] + "_" + str(vm_duplicate_count)
+ entry_name = f"{vm['name_label']}_{vm_duplicate_count}"
vm_name_list.append(vm['name_label'])
else:
entry_name = uuid
@@ -280,7 +284,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
host_name_list.append(host['name_label'])
else:
host_duplicate_count = host_name_list.count(host['name_label'])
- entry_name = host['name_label'] + "_" + str(host_duplicate_count)
+ entry_name = f"{host['name_label']}_{host_duplicate_count}"
host_name_list.append(host['name_label'])
else:
entry_name = host['uuid']
diff --git a/plugins/lookup/bitwarden.py b/plugins/lookup/bitwarden.py
index c41ab72ac3..7d65792b7f 100644
--- a/plugins/lookup/bitwarden.py
+++ b/plugins/lookup/bitwarden.py
@@ -5,66 +5,65 @@
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
-DOCUMENTATION = """
- name: bitwarden
- author:
- - Jonathan Lung (@lungj)
- requirements:
- - bw (command line utility)
- - be logged into bitwarden
- - bitwarden vault unlocked
- - E(BW_SESSION) environment variable set
- short_description: Retrieve secrets from Bitwarden
- version_added: 5.4.0
+DOCUMENTATION = r"""
+name: bitwarden
+author:
+ - Jonathan Lung (@lungj)
+requirements:
+ - bw (command line utility)
+ - be logged into bitwarden
+ - bitwarden vault unlocked
+ - E(BW_SESSION) environment variable set
+short_description: Retrieve secrets from Bitwarden
+version_added: 5.4.0
+description:
+ - Retrieve secrets from Bitwarden.
+options:
+ _terms:
+ description: Key(s) to fetch values for from login info.
+ required: true
+ type: list
+ elements: str
+ search:
description:
- - Retrieve secrets from Bitwarden.
- options:
- _terms:
- description: Key(s) to fetch values for from login info.
- required: true
- type: list
- elements: str
- search:
- description:
- - Field to retrieve, for example V(name) or V(id).
- - If set to V(id), only zero or one element can be returned.
- Use the Jinja C(first) filter to get the only list element.
- - If set to V(None) or V(''), or if O(_terms) is empty, records are not filtered by fields.
- type: str
- default: name
- version_added: 5.7.0
- field:
- description: Field to fetch. Leave unset to fetch whole response.
- type: str
- collection_id:
- description:
- - Collection ID to filter results by collection. Leave unset to skip filtering.
- - O(collection_id) and O(collection_name) are mutually exclusive.
- type: str
- version_added: 6.3.0
- collection_name:
- description:
- - Collection name to filter results by collection. Leave unset to skip filtering.
- - O(collection_id) and O(collection_name) are mutually exclusive.
- type: str
- version_added: 10.4.0
- organization_id:
- description: Organization ID to filter results by organization. Leave unset to skip filtering.
- type: str
- version_added: 8.5.0
- bw_session:
- description: Pass session key instead of reading from env.
- type: str
- version_added: 8.4.0
- result_count:
- description:
- - Number of results expected for the lookup query. Task will fail if O(result_count)
- is set but does not match the number of query results. Leave empty to skip this check.
- type: int
- version_added: 10.4.0
+ - Field to retrieve, for example V(name) or V(id).
+ - If set to V(id), only zero or one element can be returned. Use the Jinja C(first) filter to get the only list element.
+ - If set to V(None) or V(''), or if O(_terms) is empty, records are not filtered by fields.
+ type: str
+ default: name
+ version_added: 5.7.0
+ field:
+ description: Field to fetch. Leave unset to fetch whole response.
+ type: str
+ collection_id:
+ description:
+ - Collection ID to filter results by collection. Leave unset to skip filtering.
+ - O(collection_id) and O(collection_name) are mutually exclusive.
+ type: str
+ version_added: 6.3.0
+ collection_name:
+ description:
+ - Collection name to filter results by collection. Leave unset to skip filtering.
+ - O(collection_id) and O(collection_name) are mutually exclusive.
+ type: str
+ version_added: 10.4.0
+ organization_id:
+ description: Organization ID to filter results by organization. Leave unset to skip filtering.
+ type: str
+ version_added: 8.5.0
+ bw_session:
+ description: Pass session key instead of reading from env.
+ type: str
+ version_added: 8.4.0
+ result_count:
+ description:
+ - Number of results expected for the lookup query. Task fails if O(result_count) is set but does not match the number
+ of query results. Leave empty to skip this check.
+ type: int
+ version_added: 10.4.0
"""
-EXAMPLES = """
+EXAMPLES = r"""
- name: "Get 'password' from all Bitwarden records named 'a_test'"
ansible.builtin.debug:
msg: >-
@@ -111,14 +110,14 @@ EXAMPLES = """
{{ lookup('community.general.bitwarden', 'a_test', result_count=1) }}
"""
-RETURN = """
- _raw:
- description:
- - A one-element list that contains a list of requested fields or JSON objects of matches.
- - If you use C(query), you get a list of lists. If you use C(lookup) without C(wantlist=true),
- this always gets reduced to a list of field values or JSON objects.
- type: list
- elements: list
+RETURN = r"""
+_raw:
+ description:
+ - A one-element list that contains a list of requested fields or JSON objects of matches.
+ - If you use C(query), you get a list of lists. If you use C(lookup) without C(wantlist=true), this always gets reduced
+ to a list of field values or JSON objects.
+ type: list
+ elements: list
"""
from subprocess import Popen, PIPE
diff --git a/plugins/lookup/bitwarden_secrets_manager.py b/plugins/lookup/bitwarden_secrets_manager.py
index 3d08067105..431384c079 100644
--- a/plugins/lookup/bitwarden_secrets_manager.py
+++ b/plugins/lookup/bitwarden_secrets_manager.py
@@ -6,31 +6,31 @@ from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
-DOCUMENTATION = """
- name: bitwarden_secrets_manager
- author:
- - jantari (@jantari)
- requirements:
- - bws (command line utility)
- short_description: Retrieve secrets from Bitwarden Secrets Manager
- version_added: 7.2.0
- description:
- - Retrieve secrets from Bitwarden Secrets Manager.
- options:
- _terms:
- description: Secret ID(s) to fetch values for.
- required: true
- type: list
- elements: str
- bws_access_token:
- description: The BWS access token to use for this lookup.
- env:
- - name: BWS_ACCESS_TOKEN
- required: true
- type: str
+DOCUMENTATION = r"""
+name: bitwarden_secrets_manager
+author:
+ - jantari (@jantari)
+requirements:
+ - bws (command line utility)
+short_description: Retrieve secrets from Bitwarden Secrets Manager
+version_added: 7.2.0
+description:
+ - Retrieve secrets from Bitwarden Secrets Manager.
+options:
+ _terms:
+ description: Secret ID(s) to fetch values for.
+ required: true
+ type: list
+ elements: str
+ bws_access_token:
+ description: The BWS access token to use for this lookup.
+ env:
+ - name: BWS_ACCESS_TOKEN
+ required: true
+ type: str
"""
-EXAMPLES = """
+EXAMPLES = r"""
- name: Get a secret relying on the BWS_ACCESS_TOKEN environment variable for authentication
ansible.builtin.debug:
msg: >-
@@ -62,11 +62,11 @@ EXAMPLES = """
{{ lookup("community.general.bitwarden_secrets_manager", "2bc23e48-4932-40de-a047-5524b7ddc972").value }}
"""
-RETURN = """
- _raw:
- description: List containing one or more secrets.
- type: list
- elements: dict
+RETURN = r"""
+_raw:
+ description: List containing one or more secrets.
+ type: list
+ elements: dict
"""
from subprocess import Popen, PIPE
diff --git a/plugins/lookup/cartesian.py b/plugins/lookup/cartesian.py
index d63f3943b0..f2ad576907 100644
--- a/plugins/lookup/cartesian.py
+++ b/plugins/lookup/cartesian.py
@@ -6,24 +6,24 @@
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
-DOCUMENTATION = '''
- author: Unknown (!UNKNOWN)
- name: cartesian
- short_description: returns the cartesian product of lists
+DOCUMENTATION = r"""
+author: Unknown (!UNKNOWN)
+name: cartesian
+short_description: Returns the cartesian product of lists
+description:
+ - Takes the input lists and returns a list that represents the product of the input lists.
+ - It is clearer with an example, it turns [1, 2, 3], [a, b] into [1, a], [1, b], [2, a], [2, b], [3, a], [3, b].
+ - You can see the exact syntax in the examples section.
+options:
+ _terms:
description:
- - Takes the input lists and returns a list that represents the product of the input lists.
- - It is clearer with an example, it turns [1, 2, 3], [a, b] into [1, a], [1, b], [2, a], [2, b], [3, a], [3, b].
- You can see the exact syntax in the examples section.
- options:
- _terms:
- description:
- - a set of lists
- type: list
- elements: list
- required: true
-'''
+ - A set of lists.
+ type: list
+ elements: list
+ required: true
+"""
-EXAMPLES = """
+EXAMPLES = r"""
- name: Example of the change in the description
ansible.builtin.debug:
msg: "{{ lookup('community.general.cartesian', [1,2,3], [a, b])}}"
@@ -34,15 +34,15 @@ EXAMPLES = """
with_community.general.cartesian:
- "{{list1}}"
- "{{list2}}"
- - [1,2,3,4,5,6]
+ - [1, 2, 3, 4, 5, 6]
"""
-RETURN = """
- _list:
- description:
- - list of lists composed of elements of the input lists
- type: list
- elements: list
+RETURN = r"""
+_list:
+ description:
+ - List of lists composed of elements of the input lists.
+ type: list
+ elements: list
"""
from itertools import product
@@ -66,13 +66,7 @@ class LookupModule(LookupBase):
"""
results = []
for x in terms:
- try:
- intermediate = listify_lookup_plugin_terms(x, templar=self._templar)
- except TypeError:
- # The loader argument is deprecated in ansible-core 2.14+. Fall back to
- # pre-2.14 behavior for older ansible-core versions.
- intermediate = listify_lookup_plugin_terms(x, templar=self._templar, loader=self._loader)
- results.append(intermediate)
+ results.append(listify_lookup_plugin_terms(x, templar=self._templar))
return results
def run(self, terms, variables=None, **kwargs):
diff --git a/plugins/lookup/chef_databag.py b/plugins/lookup/chef_databag.py
index eaa6a1aefa..8fe53744ee 100644
--- a/plugins/lookup/chef_databag.py
+++ b/plugins/lookup/chef_databag.py
@@ -6,42 +6,41 @@
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
-DOCUMENTATION = '''
- author: Unknown (!UNKNOWN)
- name: chef_databag
- short_description: fetches data from a Chef Databag
+DOCUMENTATION = r"""
+author: Unknown (!UNKNOWN)
+name: chef_databag
+short_description: Fetches data from a Chef Databag
+description:
+ - 'This is a lookup plugin to provide access to chef data bags using the pychef package. It interfaces with the chef server
+ API using the same methods to find a knife or chef-client config file to load parameters from, starting from either the
+ given base path or the current working directory. The lookup order mirrors the one from Chef, all folders in the base
+ path are walked back looking for the following configuration file in order: C(.chef/knife.rb), C(~/.chef/knife.rb), C(/etc/chef/client.rb).'
+requirements:
+ - "pychef (L(Python library, https://pychef.readthedocs.io), C(pip install pychef))"
+options:
+ name:
description:
- - "This is a lookup plugin to provide access to chef data bags using the pychef package.
- It interfaces with the chef server api using the same methods to find a knife or chef-client config file to load parameters from,
- starting from either the given base path or the current working directory.
- The lookup order mirrors the one from Chef, all folders in the base path are walked back looking for the following configuration
- file in order : .chef/knife.rb, ~/.chef/knife.rb, /etc/chef/client.rb"
- requirements:
- - "pychef (L(Python library, https://pychef.readthedocs.io), C(pip install pychef))"
- options:
- name:
- description:
- - Name of the databag
- type: string
- required: true
- item:
- description:
- - Item to fetch
- type: string
- required: true
-'''
-
-EXAMPLES = """
- - ansible.builtin.debug:
- msg: "{{ lookup('community.general.chef_databag', 'name=data_bag_name item=data_bag_item') }}"
+ - Name of the databag.
+ type: string
+ required: true
+ item:
+ description:
+ - Item to fetch.
+ type: string
+ required: true
"""
-RETURN = """
- _raw:
- description:
- - The value from the databag.
- type: list
- elements: dict
+EXAMPLES = r"""
+- ansible.builtin.debug:
+ msg: "{{ lookup('community.general.chef_databag', 'name=data_bag_name item=data_bag_item') }}"
+"""
+
+RETURN = r"""
+_raw:
+ description:
+ - The value from the databag.
+ type: list
+ elements: dict
"""
from ansible.errors import AnsibleError
diff --git a/plugins/lookup/collection_version.py b/plugins/lookup/collection_version.py
index 28a9c34420..142c516df5 100644
--- a/plugins/lookup/collection_version.py
+++ b/plugins/lookup/collection_version.py
@@ -5,18 +5,17 @@
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
-DOCUMENTATION = """
+DOCUMENTATION = r"""
name: collection_version
author: Felix Fontein (@felixfontein)
version_added: "4.0.0"
short_description: Retrieves the version of an installed collection
description:
- - This lookup allows to query the version of an installed collection, and to determine whether a
- collection is installed at all.
- - By default it returns V(none) for non-existing collections and V(*) for collections without a
- version number. The latter should only happen in development environments, or when installing
- a collection from git which has no version in its C(galaxy.yml). This behavior can be adjusted
- by providing other values with O(result_not_found) and O(result_no_version).
+ - This lookup allows to query the version of an installed collection, and to determine whether a collection is installed
+ at all.
+ - By default it returns V(none) for non-existing collections and V(*) for collections without a version number. The latter
+ should only happen in development environments, or when installing a collection from git which has no version in its C(galaxy.yml).
+ This behavior can be adjusted by providing other values with O(result_not_found) and O(result_no_version).
options:
_terms:
description:
@@ -34,30 +33,27 @@ options:
result_no_version:
description:
- The value to return when the collection has no version number.
- - This can happen for collections installed from git which do not have a version number
- in C(galaxy.yml).
+ - This can happen for collections installed from git which do not have a version number in C(galaxy.yml).
- By default, V(*) is returned.
type: string
default: '*'
"""
-EXAMPLES = """
+EXAMPLES = r"""
- name: Check version of community.general
ansible.builtin.debug:
msg: "community.general version {{ lookup('community.general.collection_version', 'community.general') }}"
"""
-RETURN = """
- _raw:
- description:
- - The version number of the collections listed as input.
- - If a collection can not be found, it will return the value provided in O(result_not_found).
- By default, this is V(none).
- - If a collection can be found, but the version not identified, it will return the value provided in
- O(result_no_version). By default, this is V(*). This can happen for collections installed
- from git which do not have a version number in V(galaxy.yml).
- type: list
- elements: str
+RETURN = r"""
+_raw:
+ description:
+ - The version number of the collections listed as input.
+ - If a collection can not be found, it returns the value provided in O(result_not_found). By default, this is V(none).
+ - If a collection can be found, but the version not identified, it returns the value provided in O(result_no_version).
+ By default, this is V(*). This can happen for collections installed from git which do not have a version number in V(galaxy.yml).
+ type: list
+ elements: str
"""
import json
diff --git a/plugins/lookup/consul_kv.py b/plugins/lookup/consul_kv.py
index cf7226d579..f57b3da891 100644
--- a/plugins/lookup/consul_kv.py
+++ b/plugins/lookup/consul_kv.py
@@ -7,109 +7,109 @@ from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
-DOCUMENTATION = '''
- author: Unknown (!UNKNOWN)
- name: consul_kv
- short_description: Fetch metadata from a Consul key value store.
+DOCUMENTATION = r"""
+author: Unknown (!UNKNOWN)
+name: consul_kv
+short_description: Fetch metadata from a Consul key value store
+description:
+ - Lookup metadata for a playbook from the key value store in a Consul cluster. Values can be easily set in the kv store
+ with simple rest commands.
+ - C(curl -X PUT -d 'some-value' http://localhost:8500/v1/kv/ansible/somedata).
+requirements:
+ - 'python-consul python library U(https://python-consul.readthedocs.io/en/latest/#installation)'
+options:
+ _raw:
+ description: List of key(s) to retrieve.
+ type: list
+ elements: string
+ recurse:
+ type: boolean
+ description: If V(true), retrieves all the values that have the given key as prefix.
+ default: false
+ index:
description:
- - Lookup metadata for a playbook from the key value store in a Consul cluster.
- Values can be easily set in the kv store with simple rest commands
- - C(curl -X PUT -d 'some-value' http://localhost:8500/v1/kv/ansible/somedata)
- requirements:
- - 'python-consul python library U(https://python-consul.readthedocs.io/en/latest/#installation)'
- options:
- _raw:
- description: List of key(s) to retrieve.
- type: list
- elements: string
- recurse:
- type: boolean
- description: If true, will retrieve all the values that have the given key as prefix.
- default: false
- index:
- description:
- - If the key has a value with the specified index then this is returned allowing access to historical values.
- type: int
- datacenter:
- description:
- - Retrieve the key from a consul datacenter other than the default for the consul host.
- type: str
- token:
- description: The acl token to allow access to restricted values.
- type: str
- host:
- default: localhost
- type: str
- description:
- - The target to connect to, must be a resolvable address.
- - Will be determined from E(ANSIBLE_CONSUL_URL) if that is set.
- ini:
- - section: lookup_consul
- key: host
- port:
- description:
- - The port of the target host to connect to.
- - If you use E(ANSIBLE_CONSUL_URL) this value will be used from there.
- type: int
- default: 8500
- scheme:
- default: http
- type: str
- description:
- - Whether to use http or https.
- - If you use E(ANSIBLE_CONSUL_URL) this value will be used from there.
- validate_certs:
- default: true
- description: Whether to verify the TLS connection or not.
- type: bool
- env:
- - name: ANSIBLE_CONSUL_VALIDATE_CERTS
- ini:
- - section: lookup_consul
- key: validate_certs
- client_cert:
- description: The client cert to verify the TLS connection.
- type: str
- env:
- - name: ANSIBLE_CONSUL_CLIENT_CERT
- ini:
- - section: lookup_consul
- key: client_cert
- url:
- description:
- - The target to connect to.
- - "Should look like this: V(https://my.consul.server:8500)."
- type: str
- version_added: 1.0.0
- env:
- - name: ANSIBLE_CONSUL_URL
- ini:
- - section: lookup_consul
- key: url
-'''
-
-EXAMPLES = """
- - ansible.builtin.debug:
- msg: 'key contains {{item}}'
- with_community.general.consul_kv:
- - 'key/to/retrieve'
-
- - name: Parameters can be provided after the key be more specific about what to retrieve
- ansible.builtin.debug:
- msg: 'key contains {{item}}'
- with_community.general.consul_kv:
- - 'key/to recurse=true token=E6C060A9-26FB-407A-B83E-12DDAFCB4D98'
-
- - name: retrieving a KV from a remote cluster on non default port
- ansible.builtin.debug:
- msg: "{{ lookup('community.general.consul_kv', 'my/key', host='10.10.10.10', port=2000) }}"
+ - If the key has a value with the specified index then this is returned allowing access to historical values.
+ type: int
+ datacenter:
+ description:
+ - Retrieve the key from a consul datacenter other than the default for the consul host.
+ type: str
+ token:
+ description: The acl token to allow access to restricted values.
+ type: str
+ host:
+ default: localhost
+ type: str
+ description:
+ - The target to connect to, must be a resolvable address.
+ - It is determined from E(ANSIBLE_CONSUL_URL) if that is set.
+ ini:
+ - section: lookup_consul
+ key: host
+ port:
+ description:
+ - The port of the target host to connect to.
+ - If you use E(ANSIBLE_CONSUL_URL) this value is used from there.
+ type: int
+ default: 8500
+ scheme:
+ default: http
+ type: str
+ description:
+ - Whether to use http or https.
+ - If you use E(ANSIBLE_CONSUL_URL) this value is used from there.
+ validate_certs:
+ default: true
+ description: Whether to verify the TLS connection or not.
+ type: bool
+ env:
+ - name: ANSIBLE_CONSUL_VALIDATE_CERTS
+ ini:
+ - section: lookup_consul
+ key: validate_certs
+ client_cert:
+ description: The client cert to verify the TLS connection.
+ type: str
+ env:
+ - name: ANSIBLE_CONSUL_CLIENT_CERT
+ ini:
+ - section: lookup_consul
+ key: client_cert
+ url:
+ description:
+ - The target to connect to.
+ - 'Should look like this: V(https://my.consul.server:8500).'
+ type: str
+ version_added: 1.0.0
+ env:
+ - name: ANSIBLE_CONSUL_URL
+ ini:
+ - section: lookup_consul
+ key: url
"""
-RETURN = """
- _raw:
- description:
- - Value(s) stored in consul.
- type: dict
+EXAMPLES = r"""
+- ansible.builtin.debug:
+ msg: 'key contains {{item}}'
+ with_community.general.consul_kv:
+ - 'key/to/retrieve'
+
+- name: Parameters can be provided after the key be more specific about what to retrieve
+ ansible.builtin.debug:
+ msg: 'key contains {{item}}'
+ with_community.general.consul_kv:
+ - 'key/to recurse=true token=E6C060A9-26FB-407A-B83E-12DDAFCB4D98'
+
+- name: retrieving a KV from a remote cluster on non default port
+ ansible.builtin.debug:
+ msg: "{{ lookup('community.general.consul_kv', 'my/key', host='10.10.10.10', port=2000) }}"
+"""
+
+RETURN = r"""
+_raw:
+ description:
+ - Value(s) stored in consul.
+ type: dict
"""
from ansible.module_utils.six.moves.urllib.parse import urlparse
diff --git a/plugins/lookup/credstash.py b/plugins/lookup/credstash.py
index 0700a5ddcb..a170b13d03 100644
--- a/plugins/lookup/credstash.py
+++ b/plugins/lookup/credstash.py
@@ -6,54 +6,54 @@
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
-DOCUMENTATION = '''
- author: Unknown (!UNKNOWN)
- name: credstash
- short_description: retrieve secrets from Credstash on AWS
- requirements:
- - credstash (python library)
- description:
- - "Credstash is a small utility for managing secrets using AWS's KMS and DynamoDB: https://github.com/fugue/credstash"
- options:
- _terms:
- description: term or list of terms to lookup in the credit store
- type: list
- elements: string
- required: true
- table:
- description: name of the credstash table to query
- type: str
- default: 'credential-store'
- version:
- description: Credstash version
- type: str
- default: ''
- region:
- description: AWS region
- type: str
- profile_name:
- description: AWS profile to use for authentication
- type: str
- env:
- - name: AWS_PROFILE
- aws_access_key_id:
- description: AWS access key ID
- type: str
- env:
- - name: AWS_ACCESS_KEY_ID
- aws_secret_access_key:
- description: AWS access key
- type: str
- env:
- - name: AWS_SECRET_ACCESS_KEY
- aws_session_token:
- description: AWS session token
- type: str
- env:
- - name: AWS_SESSION_TOKEN
-'''
+DOCUMENTATION = r"""
+author: Unknown (!UNKNOWN)
+name: credstash
+short_description: Retrieve secrets from Credstash on AWS
+requirements:
+ - credstash (python library)
+description:
+ - "Credstash is a small utility for managing secrets using AWS's KMS and DynamoDB: https://github.com/fugue/credstash."
+options:
+ _terms:
+ description: Term or list of terms to lookup in the credit store.
+ type: list
+ elements: string
+ required: true
+ table:
+ description: Name of the credstash table to query.
+ type: str
+ default: 'credential-store'
+ version:
+ description: Credstash version.
+ type: str
+ default: ''
+ region:
+ description: AWS region.
+ type: str
+ profile_name:
+ description: AWS profile to use for authentication.
+ type: str
+ env:
+ - name: AWS_PROFILE
+ aws_access_key_id:
+ description: AWS access key ID.
+ type: str
+ env:
+ - name: AWS_ACCESS_KEY_ID
+ aws_secret_access_key:
+ description: AWS access key.
+ type: str
+ env:
+ - name: AWS_SECRET_ACCESS_KEY
+ aws_session_token:
+ description: AWS session token.
+ type: str
+ env:
+ - name: AWS_SESSION_TOKEN
+"""
-EXAMPLES = """
+EXAMPLES = r"""
- name: first use credstash to store your secrets
ansible.builtin.shell: credstash put my-github-password secure123
@@ -77,20 +77,20 @@ EXAMPLES = """
environment: production
tasks:
- - name: "Test credstash lookup plugin -- get the password with a context passed as a variable"
- ansible.builtin.debug:
- msg: "{{ lookup('community.general.credstash', 'some-password', context=context) }}"
+ - name: "Test credstash lookup plugin -- get the password with a context passed as a variable"
+ ansible.builtin.debug:
+ msg: "{{ lookup('community.general.credstash', 'some-password', context=context) }}"
- - name: "Test credstash lookup plugin -- get the password with a context defined here"
- ansible.builtin.debug:
- msg: "{{ lookup('community.general.credstash', 'some-password', context=dict(app='my_app', environment='production')) }}"
+ - name: "Test credstash lookup plugin -- get the password with a context defined here"
+ ansible.builtin.debug:
+ msg: "{{ lookup('community.general.credstash', 'some-password', context=dict(app='my_app', environment='production')) }}"
"""
-RETURN = """
- _raw:
- description:
- - Value(s) stored in Credstash.
- type: str
+RETURN = r"""
+_raw:
+ description:
+ - Value(s) stored in Credstash.
+ type: str
"""
from ansible.errors import AnsibleError
diff --git a/plugins/lookup/cyberarkpassword.py b/plugins/lookup/cyberarkpassword.py
index 4ed040dc6d..63834dce9b 100644
--- a/plugins/lookup/cyberarkpassword.py
+++ b/plugins/lookup/cyberarkpassword.py
@@ -6,62 +6,64 @@
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
-DOCUMENTATION = '''
- author: Unknown (!UNKNOWN)
- name: cyberarkpassword
- short_description: get secrets from CyberArk AIM
- requirements:
- - CyberArk AIM tool installed
+DOCUMENTATION = r"""
+author: Unknown (!UNKNOWN)
+name: cyberarkpassword
+short_description: Get secrets from CyberArk AIM
+requirements:
+ - CyberArk AIM tool installed
+description:
+ - Get secrets from CyberArk AIM.
+options:
+ _command:
+ description: Cyberark CLI utility.
+ type: string
+ env:
+ - name: AIM_CLIPASSWORDSDK_CMD
+ default: '/opt/CARKaim/sdk/clipasswordsdk'
+ appid:
+ description: Defines the unique ID of the application that is issuing the password request.
+ type: string
+ required: true
+ query:
+ description: Describes the filter criteria for the password retrieval.
+ type: string
+ required: true
+ output:
description:
- - Get secrets from CyberArk AIM.
- options :
- _command:
- description: Cyberark CLI utility.
- type: string
- env:
- - name: AIM_CLIPASSWORDSDK_CMD
- default: '/opt/CARKaim/sdk/clipasswordsdk'
- appid:
- description: Defines the unique ID of the application that is issuing the password request.
- type: string
- required: true
- query:
- description: Describes the filter criteria for the password retrieval.
- type: string
- required: true
- output:
- description:
- - Specifies the desired output fields separated by commas.
- - "They could be: Password, PassProps., PasswordChangeInProcess"
- type: string
- default: 'password'
- _extra:
- description: for extra_params values please check parameters for clipasswordsdk in CyberArk's "Credential Provider and ASCP Implementation Guide"
- notes:
- - For Ansible on Windows, please change the -parameters (-p, -d, and -o) to /parameters (/p, /d, and /o) and change the location of CLIPasswordSDK.exe.
-'''
-
-EXAMPLES = """
- - name: passing options to the lookup
- ansible.builtin.debug:
- msg: '{{ lookup("community.general.cyberarkpassword", cyquery) }}'
- vars:
- cyquery:
- appid: "app_ansible"
- query: "safe=CyberArk_Passwords;folder=root;object=AdminPass"
- output: "Password,PassProps.UserName,PassProps.Address,PasswordChangeInProcess"
-
-
- - name: used in a loop
- ansible.builtin.debug:
- msg: "{{item}}"
- with_community.general.cyberarkpassword:
- appid: 'app_ansible'
- query: 'safe=CyberArk_Passwords;folder=root;object=AdminPass'
- output: 'Password,PassProps.UserName,PassProps.Address,PasswordChangeInProcess'
+ - Specifies the desired output fields separated by commas.
+ - 'They could be: Password, PassProps., PasswordChangeInProcess.'
+ type: string
+ default: 'password'
+ _extra:
+ description: For extra_params values please check parameters for clipasswordsdk in CyberArk's "Credential Provider and
+ ASCP Implementation Guide".
+notes:
+ - For Ansible on Windows, please change the -parameters (C(-p), C(-d), and C(-o)) to /parameters (C(/p), C(/d), and C(/o))
+ and change the location of C(CLIPasswordSDK.exe).
"""
-RETURN = """
+EXAMPLES = r"""
+- name: passing options to the lookup
+ ansible.builtin.debug:
+ msg: '{{ lookup("community.general.cyberarkpassword", cyquery) }}'
+ vars:
+ cyquery:
+ appid: "app_ansible"
+ query: "safe=CyberArk_Passwords;folder=root;object=AdminPass"
+ output: "Password,PassProps.UserName,PassProps.Address,PasswordChangeInProcess"
+
+
+- name: used in a loop
+ ansible.builtin.debug:
+ msg: "{{item}}"
+ with_community.general.cyberarkpassword:
+ appid: 'app_ansible'
+ query: 'safe=CyberArk_Passwords;folder=root;object=AdminPass'
+ output: 'Password,PassProps.UserName,PassProps.Address,PasswordChangeInProcess'
+"""
+
+RETURN = r"""
_result:
description: A list containing one dictionary.
type: list
@@ -69,12 +71,12 @@ _result:
contains:
password:
description:
- - The actual value stored
+ - The actual value stored.
passprops:
- description: properties assigned to the entry
+ description: Properties assigned to the entry.
type: dictionary
passwordchangeinprocess:
- description: did the password change?
+ description: Did the password change?
"""
import os
diff --git a/plugins/lookup/dependent.py b/plugins/lookup/dependent.py
index 2b7f293872..18d2a000d9 100644
--- a/plugins/lookup/dependent.py
+++ b/plugins/lookup/dependent.py
@@ -6,31 +6,30 @@
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
-DOCUMENTATION = """
+DOCUMENTATION = r"""
name: dependent
short_description: Composes a list with nested elements of other lists or dicts which can depend on previous loop variables
author: Felix Fontein (@felixfontein)
version_added: 3.1.0
description:
- - "Takes the input lists and returns a list with elements that are lists, dictionaries,
- or template expressions which evaluate to lists or dicts, composed of the elements of
- the input evaluated lists and dictionaries."
+ - Takes the input lists and returns a list with elements that are lists, dictionaries, or template expressions which evaluate
+ to lists or dicts, composed of the elements of the input evaluated lists and dictionaries.
options:
_terms:
description:
- - A list where the elements are one-element dictionaries, mapping a name to a string, list, or dictionary.
- The name is the index that is used in the result object. The value is iterated over as described below.
+ - A list where the elements are one-element dictionaries, mapping a name to a string, list, or dictionary. The name
+ is the index that is used in the result object. The value is iterated over as described below.
- If the value is a list, it is simply iterated over.
- - If the value is a dictionary, it is iterated over and returned as if they would be processed by the
- P(ansible.builtin.dict2items#filter) filter.
- - If the value is a string, it is evaluated as Jinja2 expressions which can access the previously chosen
- elements with C(item.). The result must be a list or a dictionary.
+ - If the value is a dictionary, it is iterated over and returned as if they would be processed by the P(ansible.builtin.dict2items#filter)
+ filter.
+ - If the value is a string, it is evaluated as Jinja2 expressions which can access the previously chosen elements with
+ C(item.). The result must be a list or a dictionary.
type: list
elements: dict
required: true
"""
-EXAMPLES = """
+EXAMPLES = r"""
- name: Install/remove public keys for active admin users
ansible.posix.authorized_key:
user: "{{ item.admin.key }}"
@@ -76,9 +75,9 @@ EXAMPLES = """
loop_control:
# Makes the output readable, so that it doesn't contain the whole subdictionaries and lists
label: |-
- {{ [item.zone.key, item.prefix.key, item.entry.key,
- item.entry.value.ttl | default(3600),
- item.entry.value.absent | default(False), item.entry.value.value] }}
+ {{ [item.zone.key, item.prefix.key, item.entry.key,
+ item.entry.value.ttl | default(3600),
+ item.entry.value.absent | default(False), item.entry.value.value] }}
with_community.general.dependent:
- zone: dns_setup
- prefix: item.zone.value
@@ -89,47 +88,44 @@ EXAMPLES = """
'':
A:
value:
- - 1.2.3.4
+ - 1.2.3.4
AAAA:
value:
- - "2a01:1:2:3::1"
+ - "2a01:1:2:3::1"
'test._domainkey':
TXT:
ttl: 300
value:
- - '"k=rsa; t=s; p=MIGfMA..."'
+ - '"k=rsa; t=s; p=MIGfMA..."'
example.org:
'www':
A:
value:
- - 1.2.3.4
- - 5.6.7.8
+ - 1.2.3.4
+ - 5.6.7.8
"""
-RETURN = """
- _list:
- description:
- - A list composed of dictionaries whose keys are the variable names from the input list.
- type: list
- elements: dict
- sample:
- - key1: a
- key2: test
- - key1: a
- key2: foo
- - key1: b
- key2: bar
+RETURN = r"""
+_list:
+ description:
+ - A list composed of dictionaries whose keys are the variable names from the input list.
+ type: list
+ elements: dict
+ sample:
+ - key1: a
+ key2: test
+ - key1: a
+ key2: foo
+ - key1: b
+ key2: bar
"""
from ansible.errors import AnsibleLookupError
from ansible.module_utils.common._collections_compat import Mapping, Sequence
from ansible.module_utils.six import string_types
from ansible.plugins.lookup import LookupBase
-from ansible.release import __version__ as ansible_version
from ansible.template import Templar
-from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
-
try:
from ansible.template import trust_as_template as _trust_as_template
HAS_DATATAGGING = True
@@ -137,11 +133,6 @@ except ImportError:
HAS_DATATAGGING = False
-# Whether Templar has a cache, which can be controlled by Templar.template()'s cache option.
-# The cache was removed for ansible-core 2.14 (https://github.com/ansible/ansible/pull/78419)
-_TEMPLAR_HAS_TEMPLATE_CACHE = LooseVersion(ansible_version) < LooseVersion('2.14.0')
-
-
def _make_safe(value):
if HAS_DATATAGGING and isinstance(value, str):
return _trust_as_template(value)
@@ -157,8 +148,6 @@ class LookupModule(LookupBase):
"""
templar.available_variables = variables or {}
quoted_expression = "{0}{1}{2}".format("{{", expression, "}}")
- if _TEMPLAR_HAS_TEMPLATE_CACHE:
- return templar.template(quoted_expression, cache=False)
if hasattr(templar, 'evaluate_expression'):
# This is available since the Data Tagging PR has been merged
return templar.evaluate_expression(_make_safe(expression))
@@ -208,7 +197,10 @@ class LookupModule(LookupBase):
result = []
if len(terms) > 0:
- templar = Templar(loader=self._templar._loader)
+ if HAS_DATATAGGING:
+ templar = self._templar.copy_with_new_env(available_variables={})
+ else:
+ templar = Templar(loader=self._templar._loader)
data = []
vars_so_far = set()
for index, term in enumerate(terms):
diff --git a/plugins/lookup/dig.py b/plugins/lookup/dig.py
index cbb597b7b5..07fc287d71 100644
--- a/plugins/lookup/dig.py
+++ b/plugins/lookup/dig.py
@@ -6,89 +6,113 @@
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
-DOCUMENTATION = '''
- name: dig
- author: Jan-Piet Mens (@jpmens)
- short_description: query DNS using the dnspython library
- requirements:
- - dnspython (python library, http://www.dnspython.org/)
+DOCUMENTATION = r"""
+name: dig
+author: Jan-Piet Mens (@jpmens)
+short_description: Query DNS using the dnspython library
+requirements:
+ - dnspython (python library, http://www.dnspython.org/)
+description:
+ - The dig lookup runs queries against DNS servers to retrieve DNS records for a specific name (FQDN - fully qualified domain
+ name). It is possible to lookup any DNS record in this manner.
+ - There is a couple of different syntaxes that can be used to specify what record should be retrieved, and for which name.
+ It is also possible to explicitly specify the DNS server(s) to use for lookups.
+ - In its simplest form, the dig lookup plugin can be used to retrieve an IPv4 address (DNS A record) associated with FQDN.
+ - In addition to (default) A record, it is also possible to specify a different record type that should be queried. This
+ can be done by either passing-in additional parameter of format qtype=TYPE to the dig lookup, or by appending /TYPE to
+ the FQDN being queried.
+ - If multiple values are associated with the requested record, the results are returned as a comma-separated list. In
+ such cases you may want to pass option C(wantlist=true) to the lookup call, or alternatively use C(query) instead of C(lookup),
+ which results in the record values being returned as a list over which you can iterate later on.
+ - By default, the lookup relies on system-wide configured DNS servers for performing the query. It is also possible to
+ explicitly specify DNS servers to query using the @DNS_SERVER_1,DNS_SERVER_2,...,DNS_SERVER_N notation. This needs to
+ be passed-in as an additional parameter to the lookup.
+options:
+ _terms:
+ description: Domain(s) to query.
+ type: list
+ elements: str
+ qtype:
description:
- - The dig lookup runs queries against DNS servers to retrieve DNS records for a specific name (FQDN - fully qualified domain name).
- It is possible to lookup any DNS record in this manner.
- - There is a couple of different syntaxes that can be used to specify what record should be retrieved, and for which name.
- It is also possible to explicitly specify the DNS server(s) to use for lookups.
- - In its simplest form, the dig lookup plugin can be used to retrieve an IPv4 address (DNS A record) associated with FQDN
- - In addition to (default) A record, it is also possible to specify a different record type that should be queried.
- This can be done by either passing-in additional parameter of format qtype=TYPE to the dig lookup, or by appending /TYPE to the FQDN being queried.
- - If multiple values are associated with the requested record, the results will be returned as a comma-separated list.
- In such cases you may want to pass option C(wantlist=true) to the lookup call, or alternatively use C(query) instead of C(lookup),
- which will result in the record values being returned as a list over which you can iterate later on.
- - By default, the lookup will rely on system-wide configured DNS servers for performing the query.
- It is also possible to explicitly specify DNS servers to query using the @DNS_SERVER_1,DNS_SERVER_2,...,DNS_SERVER_N notation.
- This needs to be passed-in as an additional parameter to the lookup
- options:
- _terms:
- description: Domain(s) to query.
- type: list
- elements: str
- qtype:
- description:
- - Record type to query.
- - V(DLV) has been removed in community.general 6.0.0.
- - V(CAA) has been added in community.general 6.3.0.
- type: str
- default: 'A'
- choices: [A, ALL, AAAA, CAA, CNAME, DNAME, DNSKEY, DS, HINFO, LOC, MX, NAPTR, NS, NSEC3PARAM, PTR, RP, RRSIG, SOA, SPF, SRV, SSHFP, TLSA, TXT]
- flat:
- description: If 0 each record is returned as a dictionary, otherwise a string.
- type: int
- default: 1
- retry_servfail:
- description: Retry a nameserver if it returns SERVFAIL.
- default: false
- type: bool
- version_added: 3.6.0
- fail_on_error:
- description:
- - Abort execution on lookup errors.
- - The default for this option will likely change to V(true) in the future.
- The current default, V(false), is used for backwards compatibility, and will result in empty strings
- or the string V(NXDOMAIN) in the result in case of errors.
- default: false
- type: bool
- version_added: 5.4.0
- real_empty:
- description:
- - Return empty result without empty strings, and return empty list instead of V(NXDOMAIN).
- - The default for this option will likely change to V(true) in the future.
- - This option will be forced to V(true) if multiple domains to be queried are specified.
- default: false
- type: bool
- version_added: 6.0.0
- class:
- description:
- - "Class."
- type: str
- default: 'IN'
- tcp:
- description: Use TCP to lookup DNS records.
- default: false
- type: bool
- version_added: 7.5.0
- port:
- description: Use port as target port when looking up DNS records.
- default: 53
- type: int
- version_added: 9.5.0
- notes:
- - ALL is not a record per-se, merely the listed fields are available for any record results you retrieve in the form of a dictionary.
- - While the 'dig' lookup plugin supports anything which dnspython supports out of the box, only a subset can be converted into a dictionary.
- - If you need to obtain the AAAA record (IPv6 address), you must specify the record type explicitly.
- Syntax for specifying the record type is shown in the examples below.
- - The trailing dot in most of the examples listed is purely optional, but is specified for completeness/correctness sake.
-'''
+ - Record type to query.
+ - V(DLV) has been removed in community.general 6.0.0.
+ - V(CAA) has been added in community.general 6.3.0.
+ type: str
+ default: 'A'
+ choices:
+ - A
+ - ALL
+ - AAAA
+ - CAA
+ - CNAME
+ - DNAME
+ - DNSKEY
+ - DS
+ - HINFO
+ - LOC
+ - MX
+ - NAPTR
+ - NS
+ - NSEC3PARAM
+ - PTR
+ - RP
+ - RRSIG
+ - SOA
+ - SPF
+ - SRV
+ - SSHFP
+ - TLSA
+ - TXT
+ flat:
+ description: If 0 each record is returned as a dictionary, otherwise a string.
+ type: int
+ default: 1
+ retry_servfail:
+ description: Retry a nameserver if it returns SERVFAIL.
+ default: false
+ type: bool
+ version_added: 3.6.0
+ fail_on_error:
+ description:
+ - Abort execution on lookup errors.
+ - The default for this option is likely to change to V(true) in the future. The current default, V(false), is used for
+ backwards compatibility, and results in empty strings or the string V(NXDOMAIN) in the result in case of errors.
+ default: false
+ type: bool
+ version_added: 5.4.0
+ real_empty:
+ description:
+ - Return empty result without empty strings, and return empty list instead of V(NXDOMAIN).
+ - The default for this option is likely to change to V(true) in the future.
+ - This option is forced to V(true) if multiple domains to be queried are specified.
+ default: false
+ type: bool
+ version_added: 6.0.0
+ class:
+ description:
+ - Class.
+ type: str
+ default: 'IN'
+ tcp:
+ description: Use TCP to lookup DNS records.
+ default: false
+ type: bool
+ version_added: 7.5.0
+ port:
+ description: Use port as target port when looking up DNS records.
+ default: 53
+ type: int
+ version_added: 9.5.0
+notes:
+ - V(ALL) is not a record in itself, merely the listed fields are available for any record results you retrieve in the form
+ of a dictionary.
+ - While the plugin supports anything which C(dnspython) supports out of the box, only a subset can be converted into a dictionary.
+ - If you need to obtain the AAAA record (IPv6 address), you must specify the record type explicitly. Syntax for specifying
+ the record type is shown in the examples below.
+ - The trailing dot in most of the examples listed is purely optional, but is specified for completeness/correctness sake.
+"""
-EXAMPLES = """
+EXAMPLES = r"""
- name: Simple A record (IPV4 address) lookup for example.com
ansible.builtin.debug:
msg: "{{ lookup('community.general.dig', 'example.com.')}}"
@@ -139,83 +163,83 @@ EXAMPLES = """
msg: "{{ lookup('community.general.dig', 'example.org./A', retry_servfail=true) }}"
"""
-RETURN = """
- _list:
- description:
- - List of composed strings or dictionaries with key and value
- If a dictionary, fields shows the keys returned depending on query type
- type: list
- elements: raw
- contains:
- ALL:
- description:
- - owner, ttl, type
- A:
- description:
- - address
- AAAA:
- description:
- - address
- CAA:
- description:
- - flags
- - tag
- - value
- version_added: 6.3.0
- CNAME:
- description:
- - target
- DNAME:
- description:
- - target
- DNSKEY:
- description:
- - flags, algorithm, protocol, key
- DS:
- description:
- - algorithm, digest_type, key_tag, digest
- HINFO:
- description:
- - cpu, os
- LOC:
- description:
- - latitude, longitude, altitude, size, horizontal_precision, vertical_precision
- MX:
- description:
- - preference, exchange
- NAPTR:
- description:
- - order, preference, flags, service, regexp, replacement
- NS:
- description:
- - target
- NSEC3PARAM:
- description:
- - algorithm, flags, iterations, salt
- PTR:
- description:
- - target
- RP:
- description:
- - mbox, txt
- SOA:
- description:
- - mname, rname, serial, refresh, retry, expire, minimum
- SPF:
- description:
- - strings
- SRV:
- description:
- - priority, weight, port, target
- SSHFP:
- description:
- - algorithm, fp_type, fingerprint
- TLSA:
- description:
- - usage, selector, mtype, cert
- TXT:
- description:
- - strings
+RETURN = r"""
+_list:
+ description:
+ - List of composed strings or of dictionaries, with fields depending
+ on query type.
+ type: list
+ elements: raw
+ contains:
+ ALL:
+ description:
+ - C(owner), C(ttl), C(type).
+ A:
+ description:
+ - C(address).
+ AAAA:
+ description:
+ - C(address).
+ CAA:
+ description:
+ - C(flags).
+ - C(tag).
+ - C(value).
+ version_added: 6.3.0
+ CNAME:
+ description:
+ - C(target).
+ DNAME:
+ description:
+ - C(target).
+ DNSKEY:
+ description:
+ - C(flags), C(algorithm), C(protocol), C(key).
+ DS:
+ description:
+ - C(algorithm), C(digest_type), C(key_tag), C(digest).
+ HINFO:
+ description:
+ - C(cpu), C(os).
+ LOC:
+ description:
+ - C(latitude), C(longitude), C(altitude), C(size), C(horizontal_precision), C(vertical_precision).
+ MX:
+ description:
+ - C(preference), C(exchange).
+ NAPTR:
+ description:
+ - C(order), C(preference), C(flags), C(service), C(regexp), C(replacement).
+ NS:
+ description:
+ - C(target).
+ NSEC3PARAM:
+ description:
+ - C(algorithm), C(flags), C(iterations), C(salt).
+ PTR:
+ description:
+ - C(target).
+ RP:
+ description:
+ - C(mbox), C(txt).
+ SOA:
+ description:
+ - C(mname), C(rname), C(serial), C(refresh), C(retry), C(expire), C(minimum).
+ SPF:
+ description:
+ - C(strings).
+ SRV:
+ description:
+ - C(priority), C(weight), C(port), C(target).
+ SSHFP:
+ description:
+ - C(algorithm), C(fp_type), C(fingerprint).
+ TLSA:
+ description:
+ - C(usage), C(selector), C(mtype), C(cert).
+ TXT:
+ description:
+ - C(strings).
"""
from ansible.errors import AnsibleError
diff --git a/plugins/lookup/dnstxt.py b/plugins/lookup/dnstxt.py
index baaa63aa98..fb0a5d5138 100644
--- a/plugins/lookup/dnstxt.py
+++ b/plugins/lookup/dnstxt.py
@@ -6,30 +6,30 @@
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
-DOCUMENTATION = '''
- name: dnstxt
- author: Jan-Piet Mens (@jpmens)
- short_description: query a domain(s)'s DNS txt fields
- requirements:
- - dns/dns.resolver (python library)
+DOCUMENTATION = r"""
+name: dnstxt
+author: Jan-Piet Mens (@jpmens)
+short_description: Query a domain(s)'s DNS txt fields
+requirements:
+ - dns/dns.resolver (python library)
+description:
+ - Uses a python library to return the DNS TXT record for a domain.
+options:
+ _terms:
+ description: Domain or list of domains to query TXT records from.
+ required: true
+ type: list
+ elements: string
+ real_empty:
description:
- - Uses a python library to return the DNS TXT record for a domain.
- options:
- _terms:
- description: domain or list of domains to query TXT records from
- required: true
- type: list
- elements: string
- real_empty:
- description:
- - Return empty result without empty strings, and return empty list instead of V(NXDOMAIN).
- - The default for this option will likely change to V(true) in the future.
- default: false
- type: bool
- version_added: 6.0.0
-'''
+ - Return empty result without empty strings, and return empty list instead of V(NXDOMAIN).
+ - The default for this option is likely to change to V(true) in the future.
+ default: false
+ type: bool
+ version_added: 6.0.0
+"""
-EXAMPLES = """
+EXAMPLES = r"""
- name: show txt entry
ansible.builtin.debug:
msg: "{{lookup('community.general.dnstxt', ['test.example.com'])}}"
@@ -48,11 +48,11 @@ EXAMPLES = """
with_community.general.dnstxt: "{{lookup('community.general.dnstxt', ['test.example.com']).split(',')}}"
"""
-RETURN = """
- _list:
- description:
- - values returned by the DNS TXT record.
- type: list
+RETURN = r"""
+_list:
+ description:
+ - Values returned by the DNS TXT record.
+ type: list
"""
HAVE_DNS = False
diff --git a/plugins/lookup/dsv.py b/plugins/lookup/dsv.py
index eba3e36368..0b34b3ce31 100644
--- a/plugins/lookup/dsv.py
+++ b/plugins/lookup/dsv.py
@@ -12,81 +12,78 @@ author: Adam Migus (@amigus)
short_description: Get secrets from Thycotic DevOps Secrets Vault
version_added: 1.0.0
description:
- - Uses the Thycotic DevOps Secrets Vault Python SDK to get Secrets from a
- DSV O(tenant) using a O(client_id) and O(client_secret).
+ - Uses the Thycotic DevOps Secrets Vault Python SDK to get Secrets from a DSV O(tenant) using a O(client_id) and O(client_secret).
requirements:
- - python-dsv-sdk - https://pypi.org/project/python-dsv-sdk/
+ - python-dsv-sdk - https://pypi.org/project/python-dsv-sdk/
options:
- _terms:
- description: The path to the secret, for example V(/staging/servers/web1).
- required: true
- tenant:
- description: The first format parameter in the default O(url_template).
- type: string
- env:
- - name: DSV_TENANT
- ini:
- - section: dsv_lookup
- key: tenant
- required: true
- tld:
- default: com
- description: The top-level domain of the tenant; the second format
- parameter in the default O(url_template).
- type: string
- env:
- - name: DSV_TLD
- ini:
- - section: dsv_lookup
- key: tld
- required: false
- client_id:
- description: The client_id with which to request the Access Grant.
- type: string
- env:
- - name: DSV_CLIENT_ID
- ini:
- - section: dsv_lookup
- key: client_id
- required: true
- client_secret:
- description: The client secret associated with the specific O(client_id).
- type: string
- env:
- - name: DSV_CLIENT_SECRET
- ini:
- - section: dsv_lookup
- key: client_secret
- required: true
- url_template:
- default: https://{}.secretsvaultcloud.{}/v1
- description: The path to prepend to the base URL to form a valid REST
- API request.
- type: string
- env:
- - name: DSV_URL_TEMPLATE
- ini:
- - section: dsv_lookup
- key: url_template
- required: false
+ _terms:
+ description: The path to the secret, for example V(/staging/servers/web1).
+ required: true
+ tenant:
+ description: The first format parameter in the default O(url_template).
+ type: string
+ env:
+ - name: DSV_TENANT
+ ini:
+ - section: dsv_lookup
+ key: tenant
+ required: true
+ tld:
+ default: com
+ description: The top-level domain of the tenant; the second format parameter in the default O(url_template).
+ type: string
+ env:
+ - name: DSV_TLD
+ ini:
+ - section: dsv_lookup
+ key: tld
+ required: false
+ client_id:
+ description: The client_id with which to request the Access Grant.
+ type: string
+ env:
+ - name: DSV_CLIENT_ID
+ ini:
+ - section: dsv_lookup
+ key: client_id
+ required: true
+ client_secret:
+ description: The client secret associated with the specific O(client_id).
+ type: string
+ env:
+ - name: DSV_CLIENT_SECRET
+ ini:
+ - section: dsv_lookup
+ key: client_secret
+ required: true
+ url_template:
+ default: https://{}.secretsvaultcloud.{}/v1
+ description: The path to prepend to the base URL to form a valid REST API request.
+ type: string
+ env:
+ - name: DSV_URL_TEMPLATE
+ ini:
+ - section: dsv_lookup
+ key: url_template
+ required: false
"""
RETURN = r"""
_list:
- description:
- - One or more JSON responses to C(GET /secrets/{path}).
- - See U(https://dsv.thycotic.com/api/index.html#operation/getSecret).
- type: list
- elements: dict
+ description:
+ - One or more JSON responses to C(GET /secrets/{path}).
+ - See U(https://dsv.thycotic.com/api/index.html#operation/getSecret).
+ type: list
+ elements: dict
"""
EXAMPLES = r"""
- hosts: localhost
vars:
- secret: "{{ lookup('community.general.dsv', '/test/secret') }}"
+ secret: "{{ lookup('community.general.dsv', '/test/secret') }}"
tasks:
- - ansible.builtin.debug:
- msg: 'the password is {{ secret["data"]["password"] }}'
+ - ansible.builtin.debug:
+ msg: 'the password is {{ secret["data"]["password"] }}'
"""
from ansible.errors import AnsibleError, AnsibleOptionsError
diff --git a/plugins/lookup/etcd.py b/plugins/lookup/etcd.py
index 1e7dc3c960..d8d992e79f 100644
--- a/plugins/lookup/etcd.py
+++ b/plugins/lookup/etcd.py
@@ -8,46 +8,46 @@
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
-DOCUMENTATION = '''
- author:
- - Jan-Piet Mens (@jpmens)
- name: etcd
- short_description: get info from an etcd server
+DOCUMENTATION = r"""
+author:
+ - Jan-Piet Mens (@jpmens)
+name: etcd
+short_description: Get info from an etcd server
+description:
+ - Retrieves data from an etcd server.
+options:
+ _terms:
description:
- - Retrieves data from an etcd server
- options:
- _terms:
- description:
- - the list of keys to lookup on the etcd server
- type: list
- elements: string
- required: true
- url:
- description:
- - Environment variable with the URL for the etcd server
- type: string
- default: 'http://127.0.0.1:4001'
- env:
- - name: ANSIBLE_ETCD_URL
- version:
- description:
- - Environment variable with the etcd protocol version
- type: string
- default: 'v1'
- env:
- - name: ANSIBLE_ETCD_VERSION
- validate_certs:
- description:
- - toggle checking that the ssl certificates are valid, you normally only want to turn this off with self-signed certs.
- default: true
- type: boolean
- seealso:
- - module: community.general.etcd3
- - plugin: community.general.etcd3
- plugin_type: lookup
-'''
+ - The list of keys to lookup on the etcd server.
+ type: list
+ elements: string
+ required: true
+ url:
+ description:
+ - Environment variable with the URL for the etcd server.
+ type: string
+ default: 'http://127.0.0.1:4001'
+ env:
+ - name: ANSIBLE_ETCD_URL
+ version:
+ description:
+ - Environment variable with the etcd protocol version.
+ type: string
+ default: 'v1'
+ env:
+ - name: ANSIBLE_ETCD_VERSION
+ validate_certs:
+ description:
+ - Toggle checking that the ssl certificates are valid, you normally only want to turn this off with self-signed certs.
+ default: true
+ type: boolean
+seealso:
+ - module: community.general.etcd3
+ - plugin: community.general.etcd3
+ plugin_type: lookup
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: "a value from a locally running etcd"
ansible.builtin.debug:
msg: "{{ lookup('community.general.etcd', 'foo/bar') }}"
@@ -59,15 +59,15 @@ EXAMPLES = '''
- name: "you can set server options inline"
ansible.builtin.debug:
msg: "{{ lookup('community.general.etcd', 'foo', version='v2', url='http://192.168.0.27:4001') }}"
-'''
+"""
-RETURN = '''
- _raw:
- description:
- - List of values associated with input keys.
- type: list
- elements: string
-'''
+RETURN = r"""
+_raw:
+ description:
+ - List of values associated with input keys.
+ type: list
+ elements: string
+"""
import json
diff --git a/plugins/lookup/etcd3.py b/plugins/lookup/etcd3.py
index c67e975b97..2af1e9052b 100644
--- a/plugins/lookup/etcd3.py
+++ b/plugins/lookup/etcd3.py
@@ -7,101 +7,101 @@
from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = '''
- author:
- - Eric Belhomme (@eric-belhomme)
- version_added: '0.2.0'
- name: etcd3
- short_description: Get key values from etcd3 server
+DOCUMENTATION = r"""
+author:
+ - Eric Belhomme (@eric-belhomme)
+version_added: '0.2.0'
+name: etcd3
+short_description: Get key values from etcd3 server
+description:
+ - Retrieves key values and/or key prefixes from etcd3 server using its native gRPC API.
+ - Try to reuse M(community.general.etcd3) options for connection parameters, but add support for some E(ETCDCTL_*) environment
+ variables.
+ - See U(https://github.com/etcd-io/etcd/tree/master/Documentation/op-guide) for etcd overview.
+options:
+ _terms:
description:
- - Retrieves key values and/or key prefixes from etcd3 server using its native gRPC API.
- - Try to reuse M(community.general.etcd3) options for connection parameters, but add support for some C(ETCDCTL_*) environment variables.
- - See U(https://github.com/etcd-io/etcd/tree/master/Documentation/op-guide) for etcd overview.
+ - The list of keys (or key prefixes) to look up on the etcd3 server.
+ type: list
+ elements: str
+ required: true
+ prefix:
+ description:
+ - Look for key or prefix key.
+ type: bool
+ default: false
+ endpoints:
+ description:
+ - Counterpart of E(ETCDCTL_ENDPOINTS) environment variable. Specify the etcd3 connection with an URL form, for example
+ V(https://hostname:2379), or V(:) form.
+ - The V(host) part is overwritten by O(host) option, if defined.
+ - The V(port) part is overwritten by O(port) option, if defined.
+ env:
+ - name: ETCDCTL_ENDPOINTS
+ default: '127.0.0.1:2379'
+ type: str
+ host:
+ description:
+ - Etcd3 listening client host.
+ - Takes precedence over O(endpoints).
+ type: str
+ port:
+ description:
+ - Etcd3 listening client port.
+ - Takes precedence over O(endpoints).
+ type: int
+ ca_cert:
+ description:
+ - Etcd3 CA authority.
+ env:
+ - name: ETCDCTL_CACERT
+ type: str
+ cert_cert:
+ description:
+ - Etcd3 client certificate.
+ env:
+ - name: ETCDCTL_CERT
+ type: str
+ cert_key:
+ description:
+ - Etcd3 client private key.
+ env:
+ - name: ETCDCTL_KEY
+ type: str
+ timeout:
+ description:
+ - Client timeout.
+ default: 60
+ env:
+ - name: ETCDCTL_DIAL_TIMEOUT
+ type: int
+ user:
+ description:
+ - Authenticated user name.
+ env:
+ - name: ETCDCTL_USER
+ type: str
+ password:
+ description:
+ - Authenticated user password.
+ env:
+ - name: ETCDCTL_PASSWORD
+ type: str
- options:
- _terms:
- description:
- - The list of keys (or key prefixes) to look up on the etcd3 server.
- type: list
- elements: str
- required: true
- prefix:
- description:
- - Look for key or prefix key.
- type: bool
- default: false
- endpoints:
- description:
- - Counterpart of E(ETCDCTL_ENDPOINTS) environment variable.
- Specify the etcd3 connection with and URL form, for example V(https://hostname:2379), or V(:) form.
- - The V(host) part is overwritten by O(host) option, if defined.
- - The V(port) part is overwritten by O(port) option, if defined.
- env:
- - name: ETCDCTL_ENDPOINTS
- default: '127.0.0.1:2379'
- type: str
- host:
- description:
- - etcd3 listening client host.
- - Takes precedence over O(endpoints).
- type: str
- port:
- description:
- - etcd3 listening client port.
- - Takes precedence over O(endpoints).
- type: int
- ca_cert:
- description:
- - etcd3 CA authority.
- env:
- - name: ETCDCTL_CACERT
- type: str
- cert_cert:
- description:
- - etcd3 client certificate.
- env:
- - name: ETCDCTL_CERT
- type: str
- cert_key:
- description:
- - etcd3 client private key.
- env:
- - name: ETCDCTL_KEY
- type: str
- timeout:
- description:
- - Client timeout.
- default: 60
- env:
- - name: ETCDCTL_DIAL_TIMEOUT
- type: int
- user:
- description:
- - Authenticated user name.
- env:
- - name: ETCDCTL_USER
- type: str
- password:
- description:
- - Authenticated user password.
- env:
- - name: ETCDCTL_PASSWORD
- type: str
+notes:
+ - O(host) and O(port) options take precedence over (endpoints) option.
+ - The recommended way to connect to etcd3 server is using E(ETCDCTL_ENDPOINT) environment variable and keep O(endpoints),
+ O(host), and O(port) unused.
+seealso:
+ - module: community.general.etcd3
+ - plugin: community.general.etcd
+ plugin_type: lookup
- notes:
- - O(host) and O(port) options take precedence over (endpoints) option.
- - The recommended way to connect to etcd3 server is using E(ETCDCTL_ENDPOINT)
- environment variable and keep O(endpoints), O(host), and O(port) unused.
- seealso:
- - module: community.general.etcd3
- - plugin: community.general.etcd
- plugin_type: lookup
+requirements:
+ - "etcd3 >= 0.10"
+"""
- requirements:
- - "etcd3 >= 0.10"
-'''
-
-EXAMPLES = '''
+EXAMPLES = r"""
- name: "a value from a locally running etcd"
ansible.builtin.debug:
msg: "{{ lookup('community.general.etcd3', 'foo/bar') }}"
@@ -117,22 +117,22 @@ EXAMPLES = '''
- name: "connect to etcd3 with a client certificate"
ansible.builtin.debug:
msg: "{{ lookup('community.general.etcd3', 'foo/bar', cert_cert='/etc/ssl/etcd/client.pem', cert_key='/etc/ssl/etcd/client.key') }}"
-'''
+"""
-RETURN = '''
- _raw:
- description:
- - List of keys and associated values.
- type: list
- elements: dict
- contains:
- key:
- description: The element's key.
- type: str
- value:
- description: The element's value.
- type: str
-'''
+RETURN = r"""
+_raw:
+ description:
+ - List of keys and associated values.
+ type: list
+ elements: dict
+ contains:
+ key:
+ description: The element's key.
+ type: str
+ value:
+ description: The element's value.
+ type: str
+"""
import re
diff --git a/plugins/lookup/filetree.py b/plugins/lookup/filetree.py
index 3036e152c2..24e0c20eea 100644
--- a/plugins/lookup/filetree.py
+++ b/plugins/lookup/filetree.py
@@ -6,22 +6,23 @@
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
-DOCUMENTATION = r'''
+DOCUMENTATION = r"""
name: filetree
author: Dag Wieers (@dagwieers)
-short_description: recursively match all files in a directory tree
+short_description: Recursively match all files in a directory tree
description:
-- This lookup enables you to template a complete tree of files on a target system while retaining permissions and ownership.
-- Supports directories, files and symlinks, including SELinux and other file properties.
-- If you provide more than one path, it will implement a first_found logic, and will not process entries it already processed in previous paths.
- This enables merging different trees in order of importance, or add role_vars to specific paths to influence different instances of the same role.
+ - This lookup enables you to template a complete tree of files on a target system while retaining permissions and ownership.
+ - Supports directories, files and symlinks, including SELinux and other file properties.
+ - If you provide more than one path, it implements a first_found logic, and does not process entries it already processed
+ in previous paths. This enables merging different trees in order of importance, or add role_vars to specific paths to
+ influence different instances of the same role.
options:
_terms:
description: Path(s) of files to read.
required: true
type: list
elements: string
-'''
+"""
EXAMPLES = r"""
- name: Create directories
@@ -59,61 +60,61 @@ EXAMPLES = r"""
"""
RETURN = r"""
- _raw:
- description: List of dictionaries with file information.
- type: list
- elements: dict
- contains:
- src:
- description:
- - Full path to file.
- - Not returned when RV(_raw[].state) is set to V(directory).
- type: path
- root:
- description: Allows filtering by original location.
- type: path
- path:
- description: Contains the relative path to root.
- type: path
- mode:
- description: The permissions the resulting file or directory.
- type: str
- state:
- description: TODO
- type: str
- owner:
- description: Name of the user that owns the file/directory.
- type: raw
- group:
- description: Name of the group that owns the file/directory.
- type: raw
- seuser:
- description: The user part of the SELinux file context.
- type: raw
- serole:
- description: The role part of the SELinux file context.
- type: raw
- setype:
- description: The type part of the SELinux file context.
- type: raw
- selevel:
- description: The level part of the SELinux file context.
- type: raw
- uid:
- description: Owner ID of the file/directory.
- type: int
- gid:
- description: Group ID of the file/directory.
- type: int
- size:
- description: Size of the target.
- type: int
- mtime:
- description: Time of last modification.
- type: float
- ctime:
- description: Time of last metadata update or creation (depends on OS).
- type: float
+_raw:
+ description: List of dictionaries with file information.
+ type: list
+ elements: dict
+ contains:
+ src:
+ description:
+ - Full path to file.
+ - Not returned when RV(_raw[].state) is set to V(directory).
+ type: path
+ root:
+ description: Allows filtering by original location.
+ type: path
+ path:
+ description: Contains the relative path to root.
+ type: path
+ mode:
+ description: The permissions the resulting file or directory.
+ type: str
+ state:
+ description: TODO.
+ type: str
+ owner:
+ description: Name of the user that owns the file/directory.
+ type: raw
+ group:
+ description: Name of the group that owns the file/directory.
+ type: raw
+ seuser:
+ description: The user part of the SELinux file context.
+ type: raw
+ serole:
+ description: The role part of the SELinux file context.
+ type: raw
+ setype:
+ description: The type part of the SELinux file context.
+ type: raw
+ selevel:
+ description: The level part of the SELinux file context.
+ type: raw
+ uid:
+ description: Owner ID of the file/directory.
+ type: int
+ gid:
+ description: Group ID of the file/directory.
+ type: int
+ size:
+ description: Size of the target.
+ type: int
+ mtime:
+ description: Time of last modification.
+ type: float
+ ctime:
+ description: Time of last metadata update or creation (depends on OS).
+ type: float
"""
import os
import pwd
diff --git a/plugins/lookup/flattened.py b/plugins/lookup/flattened.py
index 5365f2ca99..de4a21fbdd 100644
--- a/plugins/lookup/flattened.py
+++ b/plugins/lookup/flattened.py
@@ -6,35 +6,35 @@
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
-DOCUMENTATION = '''
- name: flattened
- author: Serge van Ginderachter (!UNKNOWN)
- short_description: return single list completely flattened
- description:
- - Given one or more lists, this lookup will flatten any list elements found recursively until only 1 list is left.
- options:
- _terms:
- description: lists to flatten
- type: list
- elements: raw
- required: true
- notes:
- - Unlike the P(ansible.builtin.items#lookup) lookup which only flattens 1 level,
- this plugin will continue to flatten until it cannot find lists anymore.
- - Aka highlander plugin, there can only be one (list).
-'''
+DOCUMENTATION = r"""
+name: flattened
+author: Serge van Ginderachter (!UNKNOWN)
+short_description: Return single list completely flattened
+description:
+ - Given one or more lists, this lookup flattens any list elements found recursively until only 1 list is left.
+options:
+ _terms:
+ description: Lists to flatten.
+ type: list
+ elements: raw
+ required: true
+notes:
+ - Unlike the P(ansible.builtin.items#lookup) lookup which only flattens 1 level, this plugin continues to flatten until
+ it cannot find lists anymore.
+ - Aka highlander plugin, there can only be one (list).
+"""
-EXAMPLES = """
+EXAMPLES = r"""
- name: "'unnest' all elements into single list"
ansible.builtin.debug:
msg: "all in one list {{lookup('community.general.flattened', [1,2,3,[5,6]], ['a','b','c'], [[5,6,1,3], [34,'a','b','c']])}}"
"""
-RETURN = """
- _raw:
- description:
- - flattened list
- type: list
+RETURN = r"""
+_raw:
+ description:
+ - Flattened list.
+ type: list
"""
from ansible.errors import AnsibleError
from ansible.module_utils.six import string_types
@@ -67,12 +67,7 @@ class LookupModule(LookupBase):
if isinstance(term, string_types):
# convert a variable to a list
- try:
- term2 = listify_lookup_plugin_terms(term, templar=self._templar)
- except TypeError:
- # The loader argument is deprecated in ansible-core 2.14+. Fall back to
- # pre-2.14 behavior for older ansible-core versions.
- term2 = listify_lookup_plugin_terms(term, templar=self._templar, loader=self._loader)
+ term2 = listify_lookup_plugin_terms(term, templar=self._templar)
# but avoid converting a plain string to a list of one string
if term2 != [term]:
term = term2
diff --git a/plugins/lookup/github_app_access_token.py b/plugins/lookup/github_app_access_token.py
index 73fd09a0a9..dbc8cde3b5 100644
--- a/plugins/lookup/github_app_access_token.py
+++ b/plugins/lookup/github_app_access_token.py
@@ -5,49 +5,49 @@
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
-DOCUMENTATION = '''
- name: github_app_access_token
- author:
- - Poh Wei Sheng (@weisheng-p)
- short_description: Obtain short-lived Github App Access tokens
- version_added: '8.2.0'
- requirements:
- - jwt (https://github.com/GehirnInc/python-jwt)
+DOCUMENTATION = r"""
+name: github_app_access_token
+author:
+ - Poh Wei Sheng (@weisheng-p)
+short_description: Obtain short-lived Github App Access tokens
+version_added: '8.2.0'
+requirements:
+ - jwt (https://github.com/GehirnInc/python-jwt)
+description:
+ - This generates a Github access token that can be used with a C(git) command, if you use a Github App.
+options:
+ key_path:
description:
- - This generates a Github access token that can be used with a C(git) command, if you use a Github App.
- options:
- key_path:
- description:
- - Path to your private key.
- - Either O(key_path) or O(private_key) must be specified.
- type: path
- app_id:
- description:
- - Your GitHub App ID, you can find this in the Settings page.
- required: true
- type: str
- installation_id:
- description:
- - The installation ID that contains the git repository you would like access to.
- - As of 2023-12-24, this can be found via Settings page > Integrations > Application. The last part of the URL in the
- configure button is the installation ID.
- - Alternatively, you can use PyGithub (U(https://github.com/PyGithub/PyGithub)) to get your installation ID.
- required: true
- type: str
- private_key:
- description:
- - GitHub App private key in PEM file format as string.
- - Either O(key_path) or O(private_key) must be specified.
- type: str
- version_added: 10.0.0
- token_expiry:
- description:
- - How long the token should last for in seconds.
- default: 600
- type: int
-'''
+ - Path to your private key.
+ - Either O(key_path) or O(private_key) must be specified.
+ type: path
+ app_id:
+ description:
+ - Your GitHub App ID, you can find this in the Settings page.
+ required: true
+ type: str
+ installation_id:
+ description:
+ - The installation ID that contains the git repository you would like access to.
+ - As of 2023-12-24, this can be found at Settings page > Integrations > Application. The last part of the URL in the
+ configure button is the installation ID.
+ - Alternatively, you can use PyGithub (U(https://github.com/PyGithub/PyGithub)) to get your installation ID.
+ required: true
+ type: str
+ private_key:
+ description:
+ - GitHub App private key in PEM file format as string.
+ - Either O(key_path) or O(private_key) must be specified.
+ type: str
+ version_added: 10.0.0
+ token_expiry:
+ description:
+ - How long the token should last for in seconds.
+ default: 600
+ type: int
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
- name: Get access token to be used for git checkout with app_id=123456, installation_id=64209
ansible.builtin.git:
repo: >-
@@ -57,14 +57,14 @@ EXAMPLES = '''
github_token: >-
{{ lookup('community.general.github_app_access_token', key_path='/home/to_your/key',
app_id='123456', installation_id='64209') }}
-'''
+"""
-RETURN = '''
- _raw:
- description: A one-element list containing your GitHub access token.
- type: list
- elements: str
-'''
+RETURN = r"""
+_raw:
+ description: A one-element list containing your GitHub access token.
+ type: list
+ elements: str
+"""
try:
diff --git a/plugins/lookup/hiera.py b/plugins/lookup/hiera.py
index 8463a8844e..27f133d78a 100644
--- a/plugins/lookup/hiera.py
+++ b/plugins/lookup/hiera.py
@@ -6,40 +6,40 @@
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
-DOCUMENTATION = '''
- author:
- - Juan Manuel Parrilla (@jparrill)
- name: hiera
- short_description: get info from hiera data
- requirements:
- - hiera (command line utility)
+DOCUMENTATION = r"""
+author:
+ - Juan Manuel Parrilla (@jparrill)
+name: hiera
+short_description: Get info from hiera data
+requirements:
+ - hiera (command line utility)
+description:
+ - Retrieves data from an Puppetmaster node using Hiera as ENC.
+options:
+ _terms:
description:
- - Retrieves data from an Puppetmaster node using Hiera as ENC.
- options:
- _terms:
- description:
- - The list of keys to lookup on the Puppetmaster.
- type: list
- elements: string
- required: true
- executable:
- description:
- - Binary file to execute Hiera.
- type: string
- default: '/usr/bin/hiera'
- env:
- - name: ANSIBLE_HIERA_BIN
- config_file:
- description:
- - File that describes the hierarchy of Hiera.
- type: string
- default: '/etc/hiera.yaml'
- env:
- - name: ANSIBLE_HIERA_CFG
+ - The list of keys to lookup on the Puppetmaster.
+ type: list
+ elements: string
+ required: true
+ executable:
+ description:
+ - Binary file to execute Hiera.
+ type: string
+ default: '/usr/bin/hiera'
+ env:
+ - name: ANSIBLE_HIERA_BIN
+ config_file:
+ description:
+ - File that describes the hierarchy of Hiera.
+ type: string
+ default: '/etc/hiera.yaml'
+ env:
+ - name: ANSIBLE_HIERA_CFG
# FIXME: incomplete options .. _terms? environment/fqdn?
-'''
+"""
-EXAMPLES = """
+EXAMPLES = r"""
# All this examples depends on hiera.yml that describes the hierarchy
- name: "a value from Hiera 'DB'"
@@ -55,12 +55,12 @@ EXAMPLES = """
msg: "{{ lookup('community.general.hiera', 'foo fqdn=puppet01.localdomain') }}"
"""
-RETURN = """
- _raw:
- description:
- - a value associated with input key
- type: list
- elements: str
+RETURN = r"""
+_raw:
+ description:
+ - A value associated with input key.
+ type: list
+ elements: str
"""
from ansible.plugins.lookup import LookupBase
diff --git a/plugins/lookup/keyring.py b/plugins/lookup/keyring.py
index ebc35a8ee1..75d808e736 100644
--- a/plugins/lookup/keyring.py
+++ b/plugins/lookup/keyring.py
@@ -7,18 +7,18 @@
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
-DOCUMENTATION = '''
- name: keyring
- author:
- - Samuel Boucher (!UNKNOWN)
- requirements:
- - keyring (python library)
- short_description: grab secrets from the OS keyring
- description:
- - Allows you to access data stored in the OS provided keyring/keychain.
-'''
+DOCUMENTATION = r"""
+name: keyring
+author:
+ - Samuel Boucher (!UNKNOWN)
+requirements:
+ - keyring (python library)
+short_description: Grab secrets from the OS keyring
+description:
+ - Allows you to access data stored in the OS provided keyring/keychain.
+"""
-EXAMPLES = """
+EXAMPLES = r"""
- name: output secrets to screen (BAD IDEA)
ansible.builtin.debug:
msg: "Password: {{item}}"
@@ -31,11 +31,11 @@ EXAMPLES = """
login_user: joe
"""
-RETURN = """
- _raw:
- description: Secrets stored.
- type: list
- elements: str
+RETURN = r"""
+_raw:
+ description: Secrets stored.
+ type: list
+ elements: str
"""
HAS_KEYRING = True
diff --git a/plugins/lookup/lastpass.py b/plugins/lookup/lastpass.py
index 70ef8d1414..2633848937 100644
--- a/plugins/lookup/lastpass.py
+++ b/plugins/lookup/lastpass.py
@@ -6,39 +6,39 @@
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
-DOCUMENTATION = '''
- name: lastpass
- author:
- - Andrew Zenk (!UNKNOWN)
- requirements:
- - lpass (command line utility)
- - must have already logged into LastPass
- short_description: fetch data from LastPass
- description:
- - Use the lpass command line utility to fetch specific fields from LastPass.
- options:
- _terms:
- description: Key from which you want to retrieve the field.
- required: true
- type: list
- elements: str
- field:
- description: Field to return from LastPass.
- default: 'password'
- type: str
-'''
+DOCUMENTATION = r"""
+name: lastpass
+author:
+ - Andrew Zenk (!UNKNOWN)
+requirements:
+ - lpass (command line utility)
+ - must have already logged into LastPass
+short_description: Fetch data from LastPass
+description:
+ - Use the lpass command line utility to fetch specific fields from LastPass.
+options:
+ _terms:
+ description: Key from which you want to retrieve the field.
+ required: true
+ type: list
+ elements: str
+ field:
+ description: Field to return from LastPass.
+ default: 'password'
+ type: str
+"""
-EXAMPLES = """
+EXAMPLES = r"""
- name: get 'custom_field' from LastPass entry 'entry-name'
ansible.builtin.debug:
msg: "{{ lookup('community.general.lastpass', 'entry-name', field='custom_field') }}"
"""
-RETURN = """
- _raw:
- description: secrets stored
- type: list
- elements: str
+RETURN = r"""
+_raw:
+ description: Secrets stored.
+ type: list
+ elements: str
"""
from subprocess import Popen, PIPE
diff --git a/plugins/lookup/lmdb_kv.py b/plugins/lookup/lmdb_kv.py
index c09321d081..b3728abb17 100644
--- a/plugins/lookup/lmdb_kv.py
+++ b/plugins/lookup/lmdb_kv.py
@@ -6,30 +6,30 @@
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
-DOCUMENTATION = '''
- name: lmdb_kv
- author:
- - Jan-Piet Mens (@jpmens)
- version_added: '0.2.0'
- short_description: fetch data from LMDB
- description:
- - This lookup returns a list of results from an LMDB DB corresponding to a list of items given to it.
- requirements:
- - lmdb (Python library U(https://lmdb.readthedocs.io/en/release/))
- options:
- _terms:
- description: List of keys to query.
- type: list
- elements: str
- db:
- description: Path to LMDB database.
- type: str
- default: 'ansible.mdb'
- vars:
- - name: lmdb_kv_db
-'''
+DOCUMENTATION = r"""
+name: lmdb_kv
+author:
+ - Jan-Piet Mens (@jpmens)
+version_added: '0.2.0'
+short_description: Fetch data from LMDB
+description:
+ - This lookup returns a list of results from an LMDB DB corresponding to a list of items given to it.
+requirements:
+ - lmdb (Python library U(https://lmdb.readthedocs.io/en/release/))
+options:
+ _terms:
+ description: List of keys to query.
+ type: list
+ elements: str
+ db:
+ description: Path to LMDB database.
+ type: str
+ default: 'ansible.mdb'
+ vars:
+ - name: lmdb_kv_db
+"""
-EXAMPLES = """
+EXAMPLES = r"""
- name: query LMDB for a list of country codes
ansible.builtin.debug:
msg: "{{ query('community.general.lmdb_kv', 'nl', 'be', 'lu', db='jp.mdb') }}"
@@ -40,7 +40,7 @@ EXAMPLES = """
vars:
- lmdb_kv_db: jp.mdb
with_community.general.lmdb_kv:
- - "n*"
+ - "n*"
- name: get an item by key
ansible.builtin.assert:
@@ -52,9 +52,9 @@ EXAMPLES = """
- be
"""
-RETURN = """
+RETURN = r"""
_raw:
- description: value(s) stored in LMDB
+ description: Value(s) stored in LMDB.
type: list
elements: raw
"""
diff --git a/plugins/lookup/manifold.py b/plugins/lookup/manifold.py
deleted file mode 100644
index 08e63fd7ee..0000000000
--- a/plugins/lookup/manifold.py
+++ /dev/null
@@ -1,282 +0,0 @@
-# -*- coding: utf-8 -*-
-# Copyright (c) 2018, Arigato Machine Inc.
-# Copyright (c) 2018, Ansible Project
-# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
-# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-DOCUMENTATION = '''
- author:
- - Kyrylo Galanov (!UNKNOWN)
- name: manifold
- short_description: get credentials from Manifold.co
- description:
- - Retrieves resources' credentials from Manifold.co
- deprecated:
- removed_in: 11.0.0
- why: Manifold (the company) has been acquired in 2021 and the services used by this plugin are no longer operational.
- alternative: There is none.
- options:
- _terms:
- description:
- - Optional list of resource labels to lookup on Manifold.co. If no resources are specified, all
- matched resources will be returned.
- type: list
- elements: string
- required: false
- api_token:
- description:
- - manifold API token
- type: string
- required: true
- env:
- - name: MANIFOLD_API_TOKEN
- project:
- description:
- - The project label you want to get the resource for.
- type: string
- required: false
- team:
- description:
- - The team label you want to get the resource for.
- type: string
- required: false
-'''
-
-EXAMPLES = '''
- - name: all available resources
- ansible.builtin.debug:
- msg: "{{ lookup('community.general.manifold', api_token='SecretToken') }}"
- - name: all available resources for a specific project in specific team
- ansible.builtin.debug:
- msg: "{{ lookup('community.general.manifold', api_token='SecretToken', project='poject-1', team='team-2') }}"
- - name: two specific resources
- ansible.builtin.debug:
- msg: "{{ lookup('community.general.manifold', 'resource-1', 'resource-2') }}"
-'''
-
-RETURN = '''
- _raw:
- description:
- - dictionary of credentials ready to be consumed as environment variables. If multiple resources define
- the same environment variable(s), the last one returned by the Manifold API will take precedence.
- type: dict
-'''
-from ansible.errors import AnsibleError
-from ansible.plugins.lookup import LookupBase
-from ansible.module_utils.urls import open_url, ConnectionError, SSLValidationError
-from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError
-from ansible.module_utils.six.moves.urllib.parse import urlencode
-from ansible.module_utils import six
-from ansible.utils.display import Display
-from traceback import format_exception
-import json
-import sys
-
-display = Display()
-
-
-class ApiError(Exception):
- pass
-
-
-class ManifoldApiClient(object):
- http_agent = 'python-manifold-ansible-1.0.0'
-
- def __init__(self, token):
- self._token = token
-
- def _make_url(self, api, endpoint):
- return f'https://api.{api}.manifold.co/v1/{endpoint}'
-
- def request(self, api, endpoint, *args, **kwargs):
- """
- Send a request to API backend and pre-process a response.
- :param api: API to send a request to
- :type api: str
- :param endpoint: API endpoint to fetch data from
- :type endpoint: str
- :param args: other args for open_url
- :param kwargs: other kwargs for open_url
- :return: server response. JSON response is automatically deserialized.
- :rtype: dict | list | str
- """
-
- default_headers = {
- 'Authorization': f"Bearer {self._token}",
- 'Accept': "*/*" # Otherwise server doesn't set content-type header
- }
-
- url = self._make_url(api, endpoint)
-
- headers = default_headers
- arg_headers = kwargs.pop('headers', None)
- if arg_headers:
- headers.update(arg_headers)
-
- try:
- display.vvvv(f'manifold lookup connecting to {url}')
- response = open_url(url, headers=headers, http_agent=self.http_agent, *args, **kwargs)
- data = response.read()
- if response.headers.get('content-type') == 'application/json':
- data = json.loads(data)
- return data
- except ValueError:
- raise ApiError(f'JSON response can\'t be parsed while requesting {url}:\n{data}')
- except HTTPError as e:
- raise ApiError(f'Server returned: {e} while requesting {url}:\n{e.read()}')
- except URLError as e:
- raise ApiError(f'Failed lookup url for {url} : {e}')
- except SSLValidationError as e:
- raise ApiError(f'Error validating the server\'s certificate for {url}: {e}')
- except ConnectionError as e:
- raise ApiError(f'Error connecting to {url}: {e}')
-
- def get_resources(self, team_id=None, project_id=None, label=None):
- """
- Get resources list
- :param team_id: ID of the Team to filter resources by
- :type team_id: str
- :param project_id: ID of the project to filter resources by
- :type project_id: str
- :param label: filter resources by a label, returns a list with one or zero elements
- :type label: str
- :return: list of resources
- :rtype: list
- """
- api = 'marketplace'
- endpoint = 'resources'
- query_params = {}
-
- if team_id:
- query_params['team_id'] = team_id
- if project_id:
- query_params['project_id'] = project_id
- if label:
- query_params['label'] = label
-
- if query_params:
- endpoint += f"?{urlencode(query_params)}"
-
- return self.request(api, endpoint)
-
- def get_teams(self, label=None):
- """
- Get teams list
- :param label: filter teams by a label, returns a list with one or zero elements
- :type label: str
- :return: list of teams
- :rtype: list
- """
- api = 'identity'
- endpoint = 'teams'
- data = self.request(api, endpoint)
- # Label filtering is not supported by API, however this function provides uniform interface
- if label:
- data = list(filter(lambda x: x['body']['label'] == label, data))
- return data
-
- def get_projects(self, label=None):
- """
- Get projects list
- :param label: filter projects by a label, returns a list with one or zero elements
- :type label: str
- :return: list of projects
- :rtype: list
- """
- api = 'marketplace'
- endpoint = 'projects'
- query_params = {}
-
- if label:
- query_params['label'] = label
-
- if query_params:
- endpoint += f"?{urlencode(query_params)}"
-
- return self.request(api, endpoint)
-
- def get_credentials(self, resource_id):
- """
- Get resource credentials
- :param resource_id: ID of the resource to filter credentials by
- :type resource_id: str
- :return:
- """
- api = 'marketplace'
- endpoint = f"credentials?{urlencode({'resource_id': resource_id})}"
- return self.request(api, endpoint)
-
-
-class LookupModule(LookupBase):
-
- def run(self, terms, variables=None, **kwargs):
- """
- :param terms: a list of resources lookups to run.
- :param variables: ansible variables active at the time of the lookup
- :param api_token: API token
- :param project: optional project label
- :param team: optional team label
- :return: a dictionary of resources credentials
- """
-
- self.set_options(var_options=variables, direct=kwargs)
-
- api_token = self.get_option('api_token')
- project = self.get_option('project')
- team = self.get_option('team')
-
- try:
- labels = terms
- client = ManifoldApiClient(api_token)
-
- if team:
- team_data = client.get_teams(team)
- if len(team_data) == 0:
- raise AnsibleError(f"Team '{team}' does not exist")
- team_id = team_data[0]['id']
- else:
- team_id = None
-
- if project:
- project_data = client.get_projects(project)
- if len(project_data) == 0:
- raise AnsibleError(f"Project '{project}' does not exist")
- project_id = project_data[0]['id']
- else:
- project_id = None
-
- if len(labels) == 1: # Use server-side filtering if one resource is requested
- resources_data = client.get_resources(team_id=team_id, project_id=project_id, label=labels[0])
- else: # Get all resources and optionally filter labels
- resources_data = client.get_resources(team_id=team_id, project_id=project_id)
- if labels:
- resources_data = list(filter(lambda x: x['body']['label'] in labels, resources_data))
-
- if labels and len(resources_data) < len(labels):
- fetched_labels = [r['body']['label'] for r in resources_data]
- not_found_labels = [label for label in labels if label not in fetched_labels]
- raise AnsibleError(f"Resource(s) {', '.join(not_found_labels)} do not exist")
-
- credentials = {}
- cred_map = {}
- for resource in resources_data:
- resource_credentials = client.get_credentials(resource['id'])
- if len(resource_credentials) and resource_credentials[0]['body']['values']:
- for cred_key, cred_val in six.iteritems(resource_credentials[0]['body']['values']):
- label = resource['body']['label']
- if cred_key in credentials:
- display.warning(f"'{cred_key}' with label '{cred_map[cred_key]}' was replaced by resource data with label '{label}'")
- credentials[cred_key] = cred_val
- cred_map[cred_key] = label
-
- ret = [credentials]
- return ret
- except ApiError as e:
- raise AnsibleError(f'API Error: {e}')
- except AnsibleError as e:
- raise e
- except Exception:
- exc_type, exc_value, exc_traceback = sys.exc_info()
- raise AnsibleError(format_exception(exc_type, exc_value, exc_traceback))
diff --git a/plugins/lookup/merge_variables.py b/plugins/lookup/merge_variables.py
index e352524292..aff70f9799 100644
--- a/plugins/lookup/merge_variables.py
+++ b/plugins/lookup/merge_variables.py
@@ -6,72 +6,71 @@
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
-DOCUMENTATION = """
- author:
- - Roy Lenferink (@rlenferink)
- - Mark Ettema (@m-a-r-k-e)
- - Alexander Petrenz (@alpex8)
- name: merge_variables
- short_description: merge variables whose names match a given pattern
+DOCUMENTATION = r"""
+author:
+ - Roy Lenferink (@rlenferink)
+ - Mark Ettema (@m-a-r-k-e)
+ - Alexander Petrenz (@alpex8)
+name: merge_variables
+short_description: Merge variables whose names match a given pattern
+description:
+ - This lookup returns the merged result of all variables in scope that match the given prefixes, suffixes, or regular expressions,
+ optionally.
+version_added: 6.5.0
+options:
+ _terms:
description:
- - This lookup returns the merged result of all variables in scope that match the given prefixes, suffixes, or
- regular expressions, optionally.
- version_added: 6.5.0
- options:
- _terms:
- description:
- - Depending on the value of O(pattern_type), this is a list of prefixes, suffixes, or regular expressions
- that will be used to match all variables that should be merged.
- required: true
- type: list
- elements: str
- pattern_type:
- description:
- - Change the way of searching for the specified pattern.
- type: str
- default: 'regex'
- choices:
- - prefix
- - suffix
- - regex
- env:
- - name: ANSIBLE_MERGE_VARIABLES_PATTERN_TYPE
- ini:
- - section: merge_variables_lookup
- key: pattern_type
- initial_value:
- description:
- - An initial value to start with.
- type: raw
- override:
- description:
- - Return an error, print a warning or ignore it when a key will be overwritten.
- - The default behavior V(error) makes the plugin fail when a key would be overwritten.
- - When V(warn) and V(ignore) are used, note that it is important to know that the variables
- are sorted by name before being merged. Keys for later variables in this order will overwrite
- keys of the same name for variables earlier in this order. To avoid potential confusion,
- better use O(override=error) whenever possible.
- type: str
- default: 'error'
- choices:
- - error
- - warn
- - ignore
- env:
- - name: ANSIBLE_MERGE_VARIABLES_OVERRIDE
- ini:
- - section: merge_variables_lookup
- key: override
- groups:
- description:
- - Search for variables accross hosts that belong to the given groups. This allows to collect configuration pieces
- accross different hosts (for example a service on a host with its database on another host).
- type: list
- elements: str
- version_added: 8.5.0
+ - Depending on the value of O(pattern_type), this is a list of prefixes, suffixes, or regular expressions that is used
+ to match all variables that should be merged.
+ required: true
+ type: list
+ elements: str
+ pattern_type:
+ description:
+ - Change the way of searching for the specified pattern.
+ type: str
+ default: 'regex'
+ choices:
+ - prefix
+ - suffix
+ - regex
+ env:
+ - name: ANSIBLE_MERGE_VARIABLES_PATTERN_TYPE
+ ini:
+ - section: merge_variables_lookup
+ key: pattern_type
+ initial_value:
+ description:
+ - An initial value to start with.
+ type: raw
+ override:
+ description:
+ - Return an error, print a warning or ignore it when a key is overwritten.
+ - The default behavior V(error) makes the plugin fail when a key would be overwritten.
+ - When V(warn) and V(ignore) are used, note that it is important to know that the variables are sorted by name before
+ being merged. Keys for later variables in this order overwrite keys of the same name for variables earlier in this
+ order. To avoid potential confusion, better use O(override=error) whenever possible.
+ type: str
+ default: 'error'
+ choices:
+ - error
+ - warn
+ - ignore
+ env:
+ - name: ANSIBLE_MERGE_VARIABLES_OVERRIDE
+ ini:
+ - section: merge_variables_lookup
+ key: override
+ groups:
+ description:
+ - Search for variables across hosts that belong to the given groups. This allows to collect configuration pieces across
+ different hosts (for example a service on a host with its database on another host).
+ type: list
+ elements: str
+ version_added: 8.5.0
"""
-EXAMPLES = """
+EXAMPLES = r"""
# Some example variables, they can be defined anywhere as long as they are in scope
test_init_list:
- "list init item 1"
@@ -91,7 +90,6 @@ testb__test_dict:
ports:
- 3
-
# Merge variables that end with '__test_dict' and store the result in a variable 'example_a'
example_a: "{{ lookup('community.general.merge_variables', '__test_dict', pattern_type='suffix') }}"
@@ -100,7 +98,6 @@ example_a: "{{ lookup('community.general.merge_variables', '__test_dict', patter
# - 1
# - 3
-
# Merge variables that match the '^.+__test_list$' regular expression, starting with an initial value and store the
# result in a variable 'example_b'
example_b: "{{ lookup('community.general.merge_variables', '^.+__test_list$', initial_value=test_init_list) }}"
@@ -112,12 +109,11 @@ example_b: "{{ lookup('community.general.merge_variables', '^.+__test_list$', in
# - "test b item 1"
"""
-RETURN = """
- _raw:
- description: In case the search matches list items, a list will be returned. In case the search matches dicts, a
- dict will be returned.
- type: raw
- elements: raw
+RETURN = r"""
+_raw:
+ description: In case the search matches list items, a list is returned. In case the search matches dicts, a dict is returned.
+ type: raw
+ elements: raw
"""
import re
@@ -197,8 +193,8 @@ class LookupModule(LookupBase):
result = initial_value
for var_name in var_merge_names:
- with self._templar.set_temporary_context(available_variables=variables): # tmp. switch renderer to context of current variables
- var_value = self._templar.template(variables[var_name]) # Render jinja2 templates
+ temp_templar = self._templar.copy_with_new_env(available_variables=variables) # tmp. switch renderer to context of current variables
+ var_value = temp_templar.template(variables[var_name]) # Render jinja2 templates
var_type = _verify_and_get_type(var_value)
if prev_var_type is None:
diff --git a/plugins/lookup/onepassword.py b/plugins/lookup/onepassword.py
index ce0179a31e..3dc589eaaf 100644
--- a/plugins/lookup/onepassword.py
+++ b/plugins/lookup/onepassword.py
@@ -8,39 +8,39 @@
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
-DOCUMENTATION = '''
- name: onepassword
- author:
- - Scott Buchanan (@scottsb)
- - Andrew Zenk (@azenk)
- - Sam Doran (@samdoran)
- short_description: Fetch field values from 1Password
- description:
- - P(community.general.onepassword#lookup) wraps the C(op) command line utility to fetch specific field values from 1Password.
- requirements:
- - C(op) 1Password command line utility
- options:
- _terms:
- description: Identifier(s) (case-insensitive UUID or name) of item(s) to retrieve.
- required: true
- type: list
- elements: string
- account_id:
- version_added: 7.5.0
- domain:
- version_added: 3.2.0
- field:
- description: Field to return from each matching item (case-insensitive).
- default: 'password'
- type: str
- service_account_token:
- version_added: 7.1.0
- extends_documentation_fragment:
- - community.general.onepassword
- - community.general.onepassword.lookup
-'''
+DOCUMENTATION = r"""
+name: onepassword
+author:
+ - Scott Buchanan (@scottsb)
+ - Andrew Zenk (@azenk)
+ - Sam Doran (@samdoran)
+short_description: Fetch field values from 1Password
+description:
+ - P(community.general.onepassword#lookup) wraps the C(op) command line utility to fetch specific field values from 1Password.
+requirements:
+ - C(op) 1Password command line utility
+options:
+ _terms:
+ description: Identifier(s) (case-insensitive UUID or name) of item(s) to retrieve.
+ required: true
+ type: list
+ elements: string
+ account_id:
+ version_added: 7.5.0
+ domain:
+ version_added: 3.2.0
+ field:
+ description: Field to return from each matching item (case-insensitive).
+ default: 'password'
+ type: str
+ service_account_token:
+ version_added: 7.1.0
+extends_documentation_fragment:
+ - community.general.onepassword
+ - community.general.onepassword.lookup
+"""
-EXAMPLES = """
+EXAMPLES = r"""
# These examples only work when already signed in to 1Password
- name: Retrieve password for KITT when already signed in to 1Password
ansible.builtin.debug:
@@ -56,32 +56,24 @@ EXAMPLES = """
- name: Retrieve password for HAL when not signed in to 1Password
ansible.builtin.debug:
- var: lookup('community.general.onepassword',
- 'HAL 9000',
- subdomain='Discovery',
- master_password=vault_master_password)
+ var: lookup('community.general.onepassword', 'HAL 9000', subdomain='Discovery', master_password=vault_master_password)
- name: Retrieve password for HAL when never signed in to 1Password
ansible.builtin.debug:
- var: lookup('community.general.onepassword',
- 'HAL 9000',
- subdomain='Discovery',
- master_password=vault_master_password,
- username='tweety@acme.com',
- secret_key=vault_secret_key)
+ var: >-
+ lookup('community.general.onepassword', 'HAL 9000', subdomain='Discovery', master_password=vault_master_password,
+ username='tweety@acme.com', secret_key=vault_secret_key)
- name: Retrieve password from specific account
ansible.builtin.debug:
- var: lookup('community.general.onepassword',
- 'HAL 9000',
- account_id='abc123')
+ var: lookup('community.general.onepassword', 'HAL 9000', account_id='abc123')
"""
-RETURN = """
- _raw:
- description: Field data requested.
- type: list
- elements: str
+RETURN = r"""
+_raw:
+ description: Field data requested.
+ type: list
+ elements: str
"""
import abc
diff --git a/plugins/lookup/onepassword_doc.py b/plugins/lookup/onepassword_doc.py
index 5ffcf02c69..82ca790a31 100644
--- a/plugins/lookup/onepassword_doc.py
+++ b/plugins/lookup/onepassword_doc.py
@@ -6,43 +6,43 @@
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
-DOCUMENTATION = '''
- name: onepassword_doc
- author:
- - Sam Doran (@samdoran)
- requirements:
- - C(op) 1Password command line utility version 2 or later.
- short_description: Fetch documents stored in 1Password
- version_added: "8.1.0"
- description:
- - P(community.general.onepassword_doc#lookup) wraps C(op) command line utility to fetch one or more documents from 1Password.
- notes:
- - The document contents are a string exactly as stored in 1Password.
- - This plugin requires C(op) version 2 or later.
+DOCUMENTATION = r"""
+name: onepassword_doc
+author:
+ - Sam Doran (@samdoran)
+requirements:
+ - C(op) 1Password command line utility version 2 or later.
+short_description: Fetch documents stored in 1Password
+version_added: "8.1.0"
+description:
+ - P(community.general.onepassword_doc#lookup) wraps C(op) command line utility to fetch one or more documents from 1Password.
+notes:
+ - The document contents are a string exactly as stored in 1Password.
+ - This plugin requires C(op) version 2 or later.
+options:
+ _terms:
+ description: Identifier(s) (case-insensitive UUID or name) of item(s) to retrieve.
+ required: true
+ type: list
+ elements: string
- options:
- _terms:
- description: Identifier(s) (case-insensitive UUID or name) of item(s) to retrieve.
- required: true
- type: list
- elements: string
+extends_documentation_fragment:
+ - community.general.onepassword
+ - community.general.onepassword.lookup
+"""
- extends_documentation_fragment:
- - community.general.onepassword
- - community.general.onepassword.lookup
-'''
-
-EXAMPLES = """
+EXAMPLES = r"""
+---
- name: Retrieve a private key from 1Password
ansible.builtin.debug:
var: lookup('community.general.onepassword_doc', 'Private key')
"""
-RETURN = """
- _raw:
- description: Requested document
- type: list
- elements: string
+RETURN = r"""
+_raw:
+ description: Requested document.
+ type: list
+ elements: string
"""
from ansible_collections.community.general.plugins.lookup.onepassword import OnePass, OnePassCLIv2
diff --git a/plugins/lookup/onepassword_raw.py b/plugins/lookup/onepassword_raw.py
index dc3e590329..2d9829ec9d 100644
--- a/plugins/lookup/onepassword_raw.py
+++ b/plugins/lookup/onepassword_raw.py
@@ -8,35 +8,36 @@
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
-DOCUMENTATION = '''
- name: onepassword_raw
- author:
- - Scott Buchanan (@scottsb)
- - Andrew Zenk (@azenk)
- - Sam Doran (@samdoran)
- requirements:
- - C(op) 1Password command line utility
- short_description: Fetch an entire item from 1Password
- description:
- - P(community.general.onepassword_raw#lookup) wraps C(op) command line utility to fetch an entire item from 1Password.
- options:
- _terms:
- description: Identifier(s) (case-insensitive UUID or name) of item(s) to retrieve.
- required: true
- type: list
- elements: string
- account_id:
- version_added: 7.5.0
- domain:
- version_added: 6.0.0
- service_account_token:
- version_added: 7.1.0
- extends_documentation_fragment:
- - community.general.onepassword
- - community.general.onepassword.lookup
-'''
+DOCUMENTATION = r"""
+name: onepassword_raw
+author:
+ - Scott Buchanan (@scottsb)
+ - Andrew Zenk (@azenk)
+ - Sam Doran (@samdoran)
+requirements:
+ - C(op) 1Password command line utility
+short_description: Fetch an entire item from 1Password
+description:
+ - P(community.general.onepassword_raw#lookup) wraps C(op) command line utility to fetch an entire item from 1Password.
+options:
+ _terms:
+ description: Identifier(s) (case-insensitive UUID or name) of item(s) to retrieve.
+ required: true
+ type: list
+ elements: string
+ account_id:
+ version_added: 7.5.0
+ domain:
+ version_added: 6.0.0
+ service_account_token:
+ version_added: 7.1.0
+extends_documentation_fragment:
+ - community.general.onepassword
+ - community.general.onepassword.lookup
+"""
-EXAMPLES = """
+EXAMPLES = r"""
+---
- name: Retrieve all data about Wintermute
ansible.builtin.debug:
var: lookup('community.general.onepassword_raw', 'Wintermute')
@@ -46,11 +47,11 @@ EXAMPLES = """
var: lookup('community.general.onepassword_raw', 'Wintermute', subdomain='Turing', vault_password='DmbslfLvasjdl')
"""
-RETURN = """
- _raw:
- description: Entire item requested.
- type: list
- elements: dict
+RETURN = r"""
+_raw:
+ description: Entire item requested.
+ type: list
+ elements: dict
"""
import json
diff --git a/plugins/lookup/onepassword_ssh_key.py b/plugins/lookup/onepassword_ssh_key.py
index 253d8c68f4..395de59f23 100644
--- a/plugins/lookup/onepassword_ssh_key.py
+++ b/plugins/lookup/onepassword_ssh_key.py
@@ -5,7 +5,7 @@
from __future__ import annotations
-DOCUMENTATION = """
+DOCUMENTATION = r"""
name: onepassword_ssh_key
author:
- Mohammed Babelly (@mohammedbabelly20)
@@ -19,7 +19,6 @@ notes:
- By default, it returns the private key value in PKCS#8 format, unless O(ssh_format=true) is passed.
- The pluging works only for C(SSHKEY) type items.
- This plugin requires C(op) version 2 or later.
-
options:
_terms:
description: Identifier(s) (case-insensitive UUID or name) of item(s) to retrieve.
@@ -36,13 +35,14 @@ extends_documentation_fragment:
- community.general.onepassword.lookup
"""
-EXAMPLES = """
+EXAMPLES = r"""
+---
- name: Retrieve the private SSH key from 1Password
ansible.builtin.debug:
msg: "{{ lookup('community.general.onepassword_ssh_key', 'SSH Key', ssh_format=true) }}"
"""
-RETURN = """
+RETURN = r"""
_raw:
description: Private key of SSH keypair.
type: list
diff --git a/plugins/lookup/passwordstore.py b/plugins/lookup/passwordstore.py
index 479f8d537a..8f87e87034 100644
--- a/plugins/lookup/passwordstore.py
+++ b/plugins/lookup/passwordstore.py
@@ -7,167 +7,168 @@ from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
-DOCUMENTATION = '''
- name: passwordstore
- author:
- - Patrick Deelman (!UNKNOWN)
- short_description: manage passwords with passwordstore.org's pass utility
+DOCUMENTATION = r"""
+name: passwordstore
+author:
+ - Patrick Deelman (!UNKNOWN)
+short_description: Manage passwords with passwordstore.org's pass utility
+description:
+ - Enables Ansible to retrieve, create or update passwords from the passwordstore.org pass utility. It can also retrieve,
+ create or update YAML style keys stored as multilines in the passwordfile.
+ - To avoid problems when accessing multiple secrets at once, add C(auto-expand-secmem) to C(~/.gnupg/gpg-agent.conf). Where
+ this is not possible, consider using O(lock=readwrite) instead.
+options:
+ _terms:
+ description: Query key.
+ required: true
+ directory:
description:
- - Enables Ansible to retrieve, create or update passwords from the passwordstore.org pass utility.
- It can also retrieve, create or update YAML style keys stored as multilines in the passwordfile.
- - To avoid problems when accessing multiple secrets at once, add C(auto-expand-secmem) to
- C(~/.gnupg/gpg-agent.conf). Where this is not possible, consider using O(lock=readwrite) instead.
- options:
- _terms:
- description: query key.
- required: true
- directory:
- description:
- - The directory of the password store.
- - If O(backend=pass), the default is V(~/.password-store) is used.
- - If O(backend=gopass), then the default is the C(path) field in C(~/.config/gopass/config.yml),
- falling back to V(~/.local/share/gopass/stores/root) if C(path) is not defined in the gopass config.
- type: path
- vars:
- - name: passwordstore
- env:
- - name: PASSWORD_STORE_DIR
- create:
- description: Create the password or the subkey if it does not already exist. Takes precedence over O(missing).
- type: bool
- default: false
- overwrite:
- description: Overwrite the password or the subkey if it does already exist.
- type: bool
- default: false
- umask:
- description:
- - Sets the umask for the created V(.gpg) files. The first octed must be greater than 3 (user readable).
- - Note pass' default value is V('077').
- type: string
- env:
- - name: PASSWORD_STORE_UMASK
- version_added: 1.3.0
- returnall:
- description: Return all the content of the password, not only the first line.
- type: bool
- default: false
- subkey:
- description:
- - By default return a specific subkey of the password. When set to V(password), always returns the first line.
- - With O(overwrite=true), it will create the subkey and return it.
- type: str
- default: password
- userpass:
- description: Specify a password to save, instead of a generated one.
- type: str
- length:
- description: The length of the generated password.
- type: integer
- default: 16
- backup:
- description: Used with O(overwrite=true). Backup the previous password or subkey in a subkey.
- type: bool
- default: false
- nosymbols:
- description: Use alphanumeric characters.
- type: bool
- default: false
- missing:
- description:
- - List of preference about what to do if the password file is missing.
- - If O(create=true), the value for this option is ignored and assumed to be V(create).
- - If set to V(error), the lookup will error out if the passname does not exist.
- - If set to V(create), the passname will be created with the provided length O(length) if it does not exist.
- - If set to V(empty) or V(warn), will return a V(none) in case the passname does not exist.
- When using C(lookup) and not C(query), this will be translated to an empty string.
- version_added: 3.1.0
- type: str
- default: error
- choices:
- - error
- - warn
- - empty
- - create
- lock:
- description:
- - How to synchronize operations.
- - The default of V(write) only synchronizes write operations.
- - V(readwrite) synchronizes all operations (including read). This makes sure that gpg-agent is never called in parallel.
- - V(none) does not do any synchronization.
- ini:
- - section: passwordstore_lookup
- key: lock
- type: str
- default: write
- choices:
- - readwrite
- - write
- - none
- version_added: 4.5.0
- locktimeout:
- description:
- - Lock timeout applied when O(lock) is not V(none).
- - Time with a unit suffix, V(s), V(m), V(h) for seconds, minutes, and hours, respectively. For example, V(900s) equals V(15m).
- - Correlates with C(pinentry-timeout) in C(~/.gnupg/gpg-agent.conf), see C(man gpg-agent) for details.
- ini:
- - section: passwordstore_lookup
- key: locktimeout
- type: str
- default: 15m
- version_added: 4.5.0
- backend:
- description:
- - Specify which backend to use.
- - Defaults to V(pass), passwordstore.org's original pass utility.
- - V(gopass) support is incomplete.
- ini:
- - section: passwordstore_lookup
- key: backend
- vars:
- - name: passwordstore_backend
- type: str
- default: pass
- choices:
- - pass
- - gopass
- version_added: 5.2.0
- timestamp:
- description: Add the password generation information to the end of the file.
- type: bool
- default: true
- version_added: 8.1.0
- preserve:
- description: Include the old (edited) password inside the pass file.
- type: bool
- default: true
- version_added: 8.1.0
- missing_subkey:
- description:
- - Preference about what to do if the password subkey is missing.
- - If set to V(error), the lookup will error out if the subkey does not exist.
- - If set to V(empty) or V(warn), will return a V(none) in case the subkey does not exist.
- version_added: 8.6.0
- type: str
- default: empty
- choices:
- - error
- - warn
- - empty
- ini:
- - section: passwordstore_lookup
- key: missing_subkey
- notes:
- - The lookup supports passing all options as lookup parameters since community.general 6.0.0.
-'''
-EXAMPLES = """
+ - The directory of the password store.
+ - If O(backend=pass), the default is V(~/.password-store) is used.
+ - If O(backend=gopass), then the default is the C(path) field in C(~/.config/gopass/config.yml), falling back to V(~/.local/share/gopass/stores/root)
+ if C(path) is not defined in the gopass config.
+ type: path
+ vars:
+ - name: passwordstore
+ env:
+ - name: PASSWORD_STORE_DIR
+ create:
+ description: Create the password or the subkey if it does not already exist. Takes precedence over O(missing).
+ type: bool
+ default: false
+ overwrite:
+ description: Overwrite the password or the subkey if it does already exist.
+ type: bool
+ default: false
+ umask:
+ description:
+ - Sets the umask for the created V(.gpg) files. The first octed must be greater than 3 (user readable).
+ - Note pass' default value is V('077').
+ type: string
+ env:
+ - name: PASSWORD_STORE_UMASK
+ version_added: 1.3.0
+ returnall:
+ description: Return all the content of the password, not only the first line.
+ type: bool
+ default: false
+ subkey:
+ description:
+ - By default return a specific subkey of the password. When set to V(password), always returns the first line.
+ - With O(overwrite=true), it creates the subkey and returns it.
+ type: str
+ default: password
+ userpass:
+ description: Specify a password to save, instead of a generated one.
+ type: str
+ length:
+ description: The length of the generated password.
+ type: integer
+ default: 16
+ backup:
+ description: Used with O(overwrite=true). Backup the previous password or subkey in a subkey.
+ type: bool
+ default: false
+ nosymbols:
+ description: Use alphanumeric characters.
+ type: bool
+ default: false
+ missing:
+ description:
+ - List of preference about what to do if the password file is missing.
+ - If O(create=true), the value for this option is ignored and assumed to be V(create).
+ - If set to V(error), the lookup fails out if the passname does not exist.
+ - If set to V(create), the passname is created with the provided length O(length) if it does not exist.
+ - If set to V(empty) or V(warn), it returns a V(none) in case the passname does not exist. When using C(lookup) and
+ not C(query), this is translated to an empty string.
+ version_added: 3.1.0
+ type: str
+ default: error
+ choices:
+ - error
+ - warn
+ - empty
+ - create
+ lock:
+ description:
+ - How to synchronize operations.
+ - The default of V(write) only synchronizes write operations.
+ - V(readwrite) synchronizes all operations (including read). This makes sure that gpg-agent is never called in parallel.
+ - V(none) does not do any synchronization.
+ ini:
+ - section: passwordstore_lookup
+ key: lock
+ type: str
+ default: write
+ choices:
+ - readwrite
+ - write
+ - none
+ version_added: 4.5.0
+ locktimeout:
+ description:
+ - Lock timeout applied when O(lock) is not V(none).
+ - Time with a unit suffix, V(s), V(m), V(h) for seconds, minutes, and hours, respectively. For example, V(900s) equals
+ V(15m).
+ - Correlates with C(pinentry-timeout) in C(~/.gnupg/gpg-agent.conf), see C(man gpg-agent) for details.
+ ini:
+ - section: passwordstore_lookup
+ key: locktimeout
+ type: str
+ default: 15m
+ version_added: 4.5.0
+ backend:
+ description:
+ - Specify which backend to use.
+ - Defaults to V(pass), passwordstore.org's original pass utility.
+ - V(gopass) support is incomplete.
+ ini:
+ - section: passwordstore_lookup
+ key: backend
+ vars:
+ - name: passwordstore_backend
+ type: str
+ default: pass
+ choices:
+ - pass
+ - gopass
+ version_added: 5.2.0
+ timestamp:
+ description: Add the password generation information to the end of the file.
+ type: bool
+ default: true
+ version_added: 8.1.0
+ preserve:
+ description: Include the old (edited) password inside the pass file.
+ type: bool
+ default: true
+ version_added: 8.1.0
+ missing_subkey:
+ description:
+ - Preference about what to do if the password subkey is missing.
+ - If set to V(error), the lookup fails out if the subkey does not exist.
+ - If set to V(empty) or V(warn), it returns a V(none) in case the subkey does not exist.
+ version_added: 8.6.0
+ type: str
+ default: empty
+ choices:
+ - error
+ - warn
+ - empty
+ ini:
+ - section: passwordstore_lookup
+ key: missing_subkey
+notes:
+ - The lookup supports passing all options as lookup parameters since community.general 6.0.0.
+"""
+EXAMPLES = r"""
ansible.cfg: |
[passwordstore_lookup]
lock=readwrite
locktimeout=45s
missing_subkey=warn
-tasks.yml: |
+tasks.yml: |-
---
# Debug is used for examples, BAD IDEA to show passwords on screen
@@ -233,10 +234,10 @@ tasks.yml: |
passfilecontent: "{{ lookup('community.general.passwordstore', 'example/test', returnall=true)}}"
"""
-RETURN = """
+RETURN = r"""
_raw:
description:
- - a password
+ - A password.
type: list
elements: str
"""
diff --git a/plugins/lookup/random_pet.py b/plugins/lookup/random_pet.py
index 77f1c34a51..8f9b3cbd00 100644
--- a/plugins/lookup/random_pet.py
+++ b/plugins/lookup/random_pet.py
@@ -8,38 +8,38 @@ from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
-DOCUMENTATION = r'''
- name: random_pet
- author:
- - Abhijeet Kasurde (@Akasurde)
- short_description: Generates random pet names
- version_added: '3.1.0'
- requirements:
- - petname U(https://github.com/dustinkirkland/python-petname)
+DOCUMENTATION = r"""
+name: random_pet
+author:
+ - Abhijeet Kasurde (@Akasurde)
+short_description: Generates random pet names
+version_added: '3.1.0'
+requirements:
+ - petname U(https://github.com/dustinkirkland/python-petname)
+description:
+ - Generates random pet names that can be used as unique identifiers for the resources.
+options:
+ words:
description:
- - Generates random pet names that can be used as unique identifiers for the resources.
- options:
- words:
- description:
- - The number of words in the pet name.
- default: 2
- type: int
- length:
- description:
- - The maximal length of every component of the pet name.
- - Values below 3 will be set to 3 by petname.
- default: 6
- type: int
- prefix:
- description: A string to prefix with the name.
- type: str
- separator:
- description: The character to separate words in the pet name.
- default: "-"
- type: str
-'''
+ - The number of words in the pet name.
+ default: 2
+ type: int
+ length:
+ description:
+ - The maximal length of every component of the pet name.
+ - Values below V(3) are set to V(3) by petname.
+ default: 6
+ type: int
+ prefix:
+ description: A string to prefix with the name.
+ type: str
+ separator:
+ description: The character to separate words in the pet name.
+ default: "-"
+ type: str
+"""
-EXAMPLES = r'''
+EXAMPLES = r"""
- name: Generate pet name
ansible.builtin.debug:
var: lookup('community.general.random_pet')
@@ -59,14 +59,14 @@ EXAMPLES = r'''
ansible.builtin.debug:
var: lookup('community.general.random_pet', length=7)
# Example result: 'natural-peacock'
-'''
+"""
-RETURN = r'''
- _raw:
- description: A one-element list containing a random pet name
- type: list
- elements: str
-'''
+RETURN = r"""
+_raw:
+ description: A one-element list containing a random pet name.
+ type: list
+ elements: str
+"""
try:
import petname
diff --git a/plugins/lookup/random_string.py b/plugins/lookup/random_string.py
index 9b811dd8b3..4b227d3dca 100644
--- a/plugins/lookup/random_string.py
+++ b/plugins/lookup/random_string.py
@@ -9,95 +9,94 @@ from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r"""
- name: random_string
- author:
- - Abhijeet Kasurde (@Akasurde)
- short_description: Generates random string
- version_added: '3.2.0'
+name: random_string
+author:
+ - Abhijeet Kasurde (@Akasurde)
+short_description: Generates random string
+version_added: '3.2.0'
+description:
+ - Generates random string based upon the given constraints.
+ - Uses L(random.SystemRandom,https://docs.python.org/3/library/random.html#random.SystemRandom), so should be strong enough
+ for cryptographic purposes.
+options:
+ length:
+ description: The length of the string.
+ default: 8
+ type: int
+ upper:
description:
- - Generates random string based upon the given constraints.
- - Uses L(random.SystemRandom,https://docs.python.org/3/library/random.html#random.SystemRandom),
- so should be strong enough for cryptographic purposes.
- options:
- length:
- description: The length of the string.
- default: 8
- type: int
- upper:
- description:
- - Include uppercase letters in the string.
- default: true
- type: bool
- lower:
- description:
- - Include lowercase letters in the string.
- default: true
- type: bool
- numbers:
- description:
- - Include numbers in the string.
- default: true
- type: bool
- special:
- description:
- - Include special characters in the string.
- - Special characters are taken from Python standard library C(string).
- See L(the documentation of string.punctuation,https://docs.python.org/3/library/string.html#string.punctuation)
- for which characters will be used.
- - The choice of special characters can be changed to setting O(override_special).
- default: true
- type: bool
- min_numeric:
- description:
- - Minimum number of numeric characters in the string.
- - If set, overrides O(numbers=false).
- default: 0
- type: int
- min_upper:
- description:
- - Minimum number of uppercase alphabets in the string.
- - If set, overrides O(upper=false).
- default: 0
- type: int
- min_lower:
- description:
- - Minimum number of lowercase alphabets in the string.
- - If set, overrides O(lower=false).
- default: 0
- type: int
- min_special:
- description:
- - Minimum number of special character in the string.
- default: 0
- type: int
- override_special:
- description:
- - Override a list of special characters to use in the string.
- - If set O(min_special) should be set to a non-default value.
- type: str
- override_all:
- description:
- - Override all values of O(numbers), O(upper), O(lower), and O(special) with
- the given list of characters.
- type: str
- ignore_similar_chars:
- description:
- - Ignore similar characters, such as V(l) and V(1), or V(O) and V(0).
- - These characters can be configured in O(similar_chars).
- default: false
- type: bool
- version_added: 7.5.0
- similar_chars:
- description:
- - Override a list of characters not to be use in the string.
- default: "il1LoO0"
- type: str
- version_added: 7.5.0
- base64:
- description:
- - Returns base64 encoded string.
- type: bool
- default: false
+ - Include uppercase letters in the string.
+ default: true
+ type: bool
+ lower:
+ description:
+ - Include lowercase letters in the string.
+ default: true
+ type: bool
+ numbers:
+ description:
+ - Include numbers in the string.
+ default: true
+ type: bool
+ special:
+ description:
+ - Include special characters in the string.
+ - Special characters are taken from Python standard library C(string). See L(the documentation of
+ string.punctuation,https://docs.python.org/3/library/string.html#string.punctuation)
+ for which characters are used.
+ - The choice of special characters can be changed to setting O(override_special).
+ default: true
+ type: bool
+ min_numeric:
+ description:
+ - Minimum number of numeric characters in the string.
+ - If set, overrides O(numbers=false).
+ default: 0
+ type: int
+ min_upper:
+ description:
+ - Minimum number of uppercase alphabets in the string.
+ - If set, overrides O(upper=false).
+ default: 0
+ type: int
+ min_lower:
+ description:
+ - Minimum number of lowercase alphabets in the string.
+ - If set, overrides O(lower=false).
+ default: 0
+ type: int
+ min_special:
+ description:
+ - Minimum number of special character in the string.
+ default: 0
+ type: int
+ override_special:
+ description:
+ - Override a list of special characters to use in the string.
+ - If set O(min_special) should be set to a non-default value.
+ type: str
+ override_all:
+ description:
+ - Override all values of O(numbers), O(upper), O(lower), and O(special) with the given list of characters.
+ type: str
+ ignore_similar_chars:
+ description:
+ - Ignore similar characters, such as V(l) and V(1), or V(O) and V(0).
+ - These characters can be configured in O(similar_chars).
+ default: false
+ type: bool
+ version_added: 7.5.0
+ similar_chars:
+ description:
+ - Override a list of characters not to be use in the string.
+ default: "il1LoO0"
+ type: str
+ version_added: 7.5.0
+ base64:
+ description:
+ - Returns base64 encoded string.
+ type: bool
+ default: false
"""
EXAMPLES = r"""
@@ -142,10 +141,10 @@ EXAMPLES = r"""
"""
RETURN = r"""
- _raw:
- description: A one-element list containing a random string
- type: list
- elements: str
+_raw:
+ description: A one-element list containing a random string.
+ type: list
+ elements: str
"""
import base64
diff --git a/plugins/lookup/random_words.py b/plugins/lookup/random_words.py
index a4aa1b3178..247871dba0 100644
--- a/plugins/lookup/random_words.py
+++ b/plugins/lookup/random_words.py
@@ -10,44 +10,43 @@ from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r"""
- name: random_words
- author:
- - Thomas Sjögren (@konstruktoid)
- short_description: Return a number of random words
- version_added: "4.0.0"
- requirements:
- - xkcdpass U(https://github.com/redacted/XKCD-password-generator)
+name: random_words
+author:
+ - Thomas Sjögren (@konstruktoid)
+short_description: Return a number of random words
+version_added: "4.0.0"
+requirements:
+ - xkcdpass U(https://github.com/redacted/XKCD-password-generator)
+description:
+ - Returns a number of random words. The output can for example be used for passwords.
+ - See U(https://xkcd.com/936/) for background.
+options:
+ numwords:
description:
- - Returns a number of random words. The output can for example be used for
- passwords.
- - See U(https://xkcd.com/936/) for background.
- options:
- numwords:
- description:
- - The number of words.
- default: 6
- type: int
- min_length:
- description:
- - Minimum length of words to make password.
- default: 5
- type: int
- max_length:
- description:
- - Maximum length of words to make password.
- default: 9
- type: int
- delimiter:
- description:
- - The delimiter character between words.
- default: " "
- type: str
- case:
- description:
- - The method for setting the case of each word in the passphrase.
- choices: ["alternating", "upper", "lower", "random", "capitalize"]
- default: "lower"
- type: str
+ - The number of words.
+ default: 6
+ type: int
+ min_length:
+ description:
+ - Minimum length of words to make password.
+ default: 5
+ type: int
+ max_length:
+ description:
+ - Maximum length of words to make password.
+ default: 9
+ type: int
+ delimiter:
+ description:
+ - The delimiter character between words.
+ default: " "
+ type: str
+ case:
+ description:
+ - The method for setting the case of each word in the passphrase.
+ choices: ["alternating", "upper", "lower", "random", "capitalize"]
+ default: "lower"
+ type: str
"""
EXAMPLES = r"""
@@ -74,10 +73,10 @@ EXAMPLES = r"""
"""
RETURN = r"""
- _raw:
- description: A single-element list containing random words.
- type: list
- elements: str
+_raw:
+ description: A single-element list containing random words.
+ type: list
+ elements: str
"""
from ansible.errors import AnsibleLookupError
diff --git a/plugins/lookup/redis.py b/plugins/lookup/redis.py
index 5c669a7f23..bb5a122da3 100644
--- a/plugins/lookup/redis.py
+++ b/plugins/lookup/redis.py
@@ -6,50 +6,50 @@
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
-DOCUMENTATION = '''
- name: redis
- author:
- - Jan-Piet Mens (@jpmens)
- - Ansible Core Team
- short_description: fetch data from Redis
- description:
- - This lookup returns a list of results from a Redis DB corresponding to a list of items given to it
- requirements:
- - redis (python library https://github.com/andymccurdy/redis-py/)
- options:
- _terms:
- description: list of keys to query
- type: list
- elements: string
- host:
- description: location of Redis host
- type: string
- default: '127.0.0.1'
- env:
- - name: ANSIBLE_REDIS_HOST
- ini:
- - section: lookup_redis
- key: host
- port:
- description: port on which Redis is listening on
- default: 6379
- type: int
- env:
- - name: ANSIBLE_REDIS_PORT
- ini:
- - section: lookup_redis
- key: port
- socket:
- description: path to socket on which to query Redis, this option overrides host and port options when set.
- type: path
- env:
- - name: ANSIBLE_REDIS_SOCKET
- ini:
- - section: lookup_redis
- key: socket
-'''
+DOCUMENTATION = r"""
+name: redis
+author:
+ - Jan-Piet Mens (@jpmens)
+ - Ansible Core Team
+short_description: Fetch data from Redis
+description:
+ - This lookup returns a list of results from a Redis DB corresponding to a list of items given to it.
+requirements:
+ - redis (python library https://github.com/andymccurdy/redis-py/)
+options:
+ _terms:
+ description: List of keys to query.
+ type: list
+ elements: string
+ host:
+ description: Location of Redis host.
+ type: string
+ default: '127.0.0.1'
+ env:
+ - name: ANSIBLE_REDIS_HOST
+ ini:
+ - section: lookup_redis
+ key: host
+ port:
+ description: Port on which Redis is listening on.
+ default: 6379
+ type: int
+ env:
+ - name: ANSIBLE_REDIS_PORT
+ ini:
+ - section: lookup_redis
+ key: port
+ socket:
+ description: Path to socket on which to query Redis, this option overrides host and port options when set.
+ type: path
+ env:
+ - name: ANSIBLE_REDIS_SOCKET
+ ini:
+ - section: lookup_redis
+ key: socket
+"""
-EXAMPLES = """
+EXAMPLES = r"""
- name: query redis for somekey (default or configured settings used)
ansible.builtin.debug:
msg: "{{ lookup('community.general.redis', 'somekey') }}"
@@ -66,12 +66,11 @@ EXAMPLES = """
- name: use list directly with a socket
ansible.builtin.debug:
msg: "{{ lookup('community.general.redis', 'key1', 'key2', socket='/var/tmp/redis.sock') }}"
-
"""
-RETURN = """
+RETURN = r"""
_raw:
- description: value(s) stored in Redis
+ description: Value(s) stored in Redis.
type: list
elements: str
"""
diff --git a/plugins/lookup/revbitspss.py b/plugins/lookup/revbitspss.py
index 89c19cf23c..6b31963f4a 100644
--- a/plugins/lookup/revbitspss.py
+++ b/plugins/lookup/revbitspss.py
@@ -12,54 +12,55 @@ author: RevBits (@RevBits)
short_description: Get secrets from RevBits PAM server
version_added: 4.1.0
description:
- - Uses the revbits_ansible Python SDK to get Secrets from RevBits PAM
- Server using API key authentication with the REST API.
+ - Uses the revbits_ansible Python SDK to get Secrets from RevBits PAM Server using API key authentication with the REST
+ API.
requirements:
- - revbits_ansible - U(https://pypi.org/project/revbits_ansible/)
+ - revbits_ansible - U(https://pypi.org/project/revbits_ansible/)
options:
- _terms:
- description:
- - This will be an array of keys for secrets which you want to fetch from RevBits PAM.
- required: true
- type: list
- elements: string
- base_url:
- description:
- - This will be the base URL of the server, for example V(https://server-url-here).
- required: true
- type: string
- api_key:
- description:
- - This will be the API key for authentication. You can get it from the RevBits PAM secret manager module.
- required: true
- type: string
+ _terms:
+ description:
+ - This is an array of keys for secrets which you want to fetch from RevBits PAM.
+ required: true
+ type: list
+ elements: string
+ base_url:
+ description:
+ - This is the base URL of the server, for example V(https://server-url-here).
+ required: true
+ type: string
+ api_key:
+ description:
+ - This is the API key for authentication. You can get it from the RevBits PAM secret manager module.
+ required: true
+ type: string
"""
RETURN = r"""
_list:
- description:
- - The JSON responses which you can access with defined keys.
- - If you are fetching secrets named as UUID, PASSWORD it will gives you the dict of all secrets.
- type: list
- elements: dict
+ description:
+ - The JSON responses which you can access with defined keys.
+ - If you are fetching secrets named as UUID, PASSWORD it returns the dict of all secrets.
+ type: list
+ elements: dict
"""
EXAMPLES = r"""
+---
- hosts: localhost
vars:
- secret: >-
- {{
- lookup(
- 'community.general.revbitspss',
- 'UUIDPAM', 'DB_PASS',
- base_url='https://server-url-here',
- api_key='API_KEY_GOES_HERE'
- )
- }}
+ secret: >-
+ {{
+ lookup(
+ 'community.general.revbitspss',
+ 'UUIDPAM', 'DB_PASS',
+ base_url='https://server-url-here',
+ api_key='API_KEY_GOES_HERE'
+ )
+ }}
tasks:
- - ansible.builtin.debug:
- msg: >
- UUIDPAM is {{ (secret['UUIDPAM']) }} and DB_PASS is {{ (secret['DB_PASS']) }}
+ - ansible.builtin.debug:
+ msg: >-
+ UUIDPAM is {{ (secret['UUIDPAM']) }} and DB_PASS is {{ (secret['DB_PASS']) }}
"""
from ansible.plugins.lookup import LookupBase
diff --git a/plugins/lookup/shelvefile.py b/plugins/lookup/shelvefile.py
index 4d965372fb..f4142f67c6 100644
--- a/plugins/lookup/shelvefile.py
+++ b/plugins/lookup/shelvefile.py
@@ -6,34 +6,35 @@
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
-DOCUMENTATION = '''
- name: shelvefile
- author: Alejandro Guirao (!UNKNOWN)
- short_description: read keys from Python shelve file
- description:
- - Read keys from Python shelve file.
- options:
- _terms:
- description: Sets of key value pairs of parameters.
- type: list
- elements: str
- key:
- description: Key to query.
- type: str
- required: true
- file:
- description: Path to shelve file.
- type: path
- required: true
-'''
+DOCUMENTATION = r"""
+name: shelvefile
+author: Alejandro Guirao (!UNKNOWN)
+short_description: Read keys from Python shelve file
+description:
+ - Read keys from Python shelve file.
+options:
+ _terms:
+ description: Sets of key value pairs of parameters.
+ type: list
+ elements: str
+ key:
+ description: Key to query.
+ type: str
+ required: true
+ file:
+ description: Path to shelve file.
+ type: path
+ required: true
+"""
-EXAMPLES = """
+EXAMPLES = r"""
+---
- name: Retrieve a string value corresponding to a key inside a Python shelve file
ansible.builtin.debug:
msg: "{{ lookup('community.general.shelvefile', 'file=path_to_some_shelve_file.db key=key_to_retrieve') }}"
"""
-RETURN = """
+RETURN = r"""
_list:
description: Value(s) of key(s) in shelve file(s).
type: list
diff --git a/plugins/lookup/tss.py b/plugins/lookup/tss.py
index ffae6bb824..3d73fcbe99 100644
--- a/plugins/lookup/tss.py
+++ b/plugins/lookup/tss.py
@@ -12,200 +12,196 @@ author: Adam Migus (@amigus)
short_description: Get secrets from Thycotic Secret Server
version_added: 1.0.0
description:
- - Uses the Thycotic Secret Server Python SDK to get Secrets from Secret
- Server using token authentication with O(username) and O(password) on
- the REST API at O(base_url).
- - When using self-signed certificates the environment variable
- E(REQUESTS_CA_BUNDLE) can be set to a file containing the trusted certificates
- (in C(.pem) format).
- - For example, C(export REQUESTS_CA_BUNDLE='/etc/ssl/certs/ca-bundle.trust.crt').
+ - Uses the Thycotic Secret Server Python SDK to get Secrets from Secret Server using token authentication with O(username)
+ and O(password) on the REST API at O(base_url).
+ - When using self-signed certificates the environment variable E(REQUESTS_CA_BUNDLE) can be set to a file containing the
+ trusted certificates (in C(.pem) format).
+ - For example, C(export REQUESTS_CA_BUNDLE='/etc/ssl/certs/ca-bundle.trust.crt').
requirements:
- - python-tss-sdk - https://pypi.org/project/python-tss-sdk/
+ - python-tss-sdk - https://pypi.org/project/python-tss-sdk/
options:
- _terms:
- description: The integer ID of the secret.
- required: true
- type: list
- elements: int
- secret_path:
- description: Indicate a full path of secret including folder and secret name when the secret ID is set to 0.
- required: false
- type: str
- version_added: 7.2.0
- fetch_secret_ids_from_folder:
- description:
- - Boolean flag which indicates whether secret ids are in a folder is fetched by folder ID or not.
- - V(true) then the terms will be considered as a folder IDs. Otherwise (default), they are considered as secret IDs.
- required: false
- type: bool
- version_added: 7.1.0
- fetch_attachments:
- description:
- - Boolean flag which indicates whether attached files will get downloaded or not.
- - The download will only happen if O(file_download_path) has been provided.
- required: false
- type: bool
- version_added: 7.0.0
- file_download_path:
- description: Indicate the file attachment download location.
- required: false
- type: path
- version_added: 7.0.0
- base_url:
- description: The base URL of the server, for example V(https://localhost/SecretServer).
- type: string
- env:
- - name: TSS_BASE_URL
- ini:
- - section: tss_lookup
- key: base_url
- required: true
- username:
- description: The username with which to request the OAuth2 Access Grant.
- type: string
- env:
- - name: TSS_USERNAME
- ini:
- - section: tss_lookup
- key: username
- password:
- description:
- - The password associated with the supplied username.
- - Required when O(token) is not provided.
- type: string
- env:
- - name: TSS_PASSWORD
- ini:
- - section: tss_lookup
- key: password
- domain:
- default: ""
- description:
- - The domain with which to request the OAuth2 Access Grant.
- - Optional when O(token) is not provided.
- - Requires C(python-tss-sdk) version 1.0.0 or greater.
- type: string
- env:
- - name: TSS_DOMAIN
- ini:
- - section: tss_lookup
- key: domain
- required: false
- version_added: 3.6.0
- token:
- description:
- - Existing token for Thycotic authorizer.
- - If provided, O(username) and O(password) are not needed.
- - Requires C(python-tss-sdk) version 1.0.0 or greater.
- type: string
- env:
- - name: TSS_TOKEN
- ini:
- - section: tss_lookup
- key: token
- version_added: 3.7.0
- api_path_uri:
- default: /api/v1
- description: The path to append to the base URL to form a valid REST
- API request.
- type: string
- env:
- - name: TSS_API_PATH_URI
- required: false
- token_path_uri:
- default: /oauth2/token
- description: The path to append to the base URL to form a valid OAuth2
- Access Grant request.
- type: string
- env:
- - name: TSS_TOKEN_PATH_URI
- required: false
+ _terms:
+ description: The integer ID of the secret.
+ required: true
+ type: list
+ elements: int
+ secret_path:
+ description: Indicate a full path of secret including folder and secret name when the secret ID is set to 0.
+ required: false
+ type: str
+ version_added: 7.2.0
+ fetch_secret_ids_from_folder:
+ description:
+ - Boolean flag which indicates whether secret IDs are in a folder is fetched by folder ID or not.
+ - V(true) then the terms are considered as a folder IDs. Otherwise (default), they are considered as secret IDs.
+ required: false
+ type: bool
+ version_added: 7.1.0
+ fetch_attachments:
+ description:
+ - Boolean flag which indicates whether attached files are downloaded or not.
+ - The download only happens if O(file_download_path) has been provided.
+ required: false
+ type: bool
+ version_added: 7.0.0
+ file_download_path:
+ description: Indicate the file attachment download location.
+ required: false
+ type: path
+ version_added: 7.0.0
+ base_url:
+ description: The base URL of the server, for example V(https://localhost/SecretServer).
+ type: string
+ env:
+ - name: TSS_BASE_URL
+ ini:
+ - section: tss_lookup
+ key: base_url
+ required: true
+ username:
+ description: The username with which to request the OAuth2 Access Grant.
+ type: string
+ env:
+ - name: TSS_USERNAME
+ ini:
+ - section: tss_lookup
+ key: username
+ password:
+ description:
+ - The password associated with the supplied username.
+ - Required when O(token) is not provided.
+ type: string
+ env:
+ - name: TSS_PASSWORD
+ ini:
+ - section: tss_lookup
+ key: password
+ domain:
+ default: ""
+ description:
+ - The domain with which to request the OAuth2 Access Grant.
+ - Optional when O(token) is not provided.
+ - Requires C(python-tss-sdk) version 1.0.0 or greater.
+ type: string
+ env:
+ - name: TSS_DOMAIN
+ ini:
+ - section: tss_lookup
+ key: domain
+ required: false
+ version_added: 3.6.0
+ token:
+ description:
+ - Existing token for Thycotic authorizer.
+ - If provided, O(username) and O(password) are not needed.
+ - Requires C(python-tss-sdk) version 1.0.0 or greater.
+ type: string
+ env:
+ - name: TSS_TOKEN
+ ini:
+ - section: tss_lookup
+ key: token
+ version_added: 3.7.0
+ api_path_uri:
+ default: /api/v1
+ description: The path to append to the base URL to form a valid REST API request.
+ type: string
+ env:
+ - name: TSS_API_PATH_URI
+ required: false
+ token_path_uri:
+ default: /oauth2/token
+ description: The path to append to the base URL to form a valid OAuth2 Access Grant request.
+ type: string
+ env:
+ - name: TSS_TOKEN_PATH_URI
+ required: false
"""
RETURN = r"""
_list:
- description:
- - The JSON responses to C(GET /secrets/{id}).
- - See U(https://updates.thycotic.net/secretserver/restapiguide/TokenAuth/#operation--secrets--id--get).
- type: list
- elements: dict
+ description:
+ - The JSON responses to C(GET /secrets/{id}).
+ - See U(https://updates.thycotic.net/secretserver/restapiguide/TokenAuth/#operation--secrets--id--get).
+ type: list
+ elements: dict
"""
EXAMPLES = r"""
- hosts: localhost
vars:
- secret: >-
- {{
- lookup(
- 'community.general.tss',
- 102,
- base_url='https://secretserver.domain.com/SecretServer/',
- username='user.name',
- password='password'
- )
- }}
+ secret: >-
+ {{
+ lookup(
+ 'community.general.tss',
+ 102,
+ base_url='https://secretserver.domain.com/SecretServer/',
+ username='user.name',
+ password='password'
+ )
+ }}
tasks:
- - ansible.builtin.debug:
- msg: >
- the password is {{
- (secret['items']
- | items2dict(key_name='slug',
- value_name='itemValue'))['password']
- }}
+ - ansible.builtin.debug:
+ msg: >
+ the password is {{
+ (secret['items']
+ | items2dict(key_name='slug',
+ value_name='itemValue'))['password']
+ }}
- hosts: localhost
vars:
- secret: >-
- {{
- lookup(
- 'community.general.tss',
- 102,
- base_url='https://secretserver.domain.com/SecretServer/',
- username='user.name',
- password='password',
- domain='domain'
- )
- }}
+ secret: >-
+ {{
+ lookup(
+ 'community.general.tss',
+ 102,
+ base_url='https://secretserver.domain.com/SecretServer/',
+ username='user.name',
+ password='password',
+ domain='domain'
+ )
+ }}
tasks:
- - ansible.builtin.debug:
- msg: >
- the password is {{
- (secret['items']
- | items2dict(key_name='slug',
- value_name='itemValue'))['password']
- }}
+ - ansible.builtin.debug:
+ msg: >
+ the password is {{
+ (secret['items']
+ | items2dict(key_name='slug',
+ value_name='itemValue'))['password']
+ }}
- hosts: localhost
vars:
- secret_password: >-
- {{
- ((lookup(
- 'community.general.tss',
- 102,
- base_url='https://secretserver.domain.com/SecretServer/',
- token='thycotic_access_token',
- ) | from_json).get('items') | items2dict(key_name='slug', value_name='itemValue'))['password']
- }}
+ secret_password: >-
+ {{
+ ((lookup(
+ 'community.general.tss',
+ 102,
+ base_url='https://secretserver.domain.com/SecretServer/',
+ token='thycotic_access_token',
+ ) | from_json).get('items') | items2dict(key_name='slug', value_name='itemValue'))['password']
+ }}
tasks:
- - ansible.builtin.debug:
- msg: the password is {{ secret_password }}
+ - ansible.builtin.debug:
+ msg: the password is {{ secret_password }}
# Private key stores into certificate file which is attached with secret.
# If fetch_attachments=True then private key file will be download on specified path
# and file content will display in debug message.
- hosts: localhost
vars:
- secret: >-
- {{
- lookup(
- 'community.general.tss',
- 102,
- fetch_attachments=True,
- file_download_path='/home/certs',
- base_url='https://secretserver.domain.com/SecretServer/',
- token='thycotic_access_token'
- )
- }}
+ secret: >-
+ {{
+ lookup(
+ 'community.general.tss',
+ 102,
+ fetch_attachments=True,
+ file_download_path='/home/certs',
+ base_url='https://secretserver.domain.com/SecretServer/',
+ token='thycotic_access_token'
+ )
+ }}
tasks:
- ansible.builtin.debug:
msg: >
@@ -218,16 +214,16 @@ EXAMPLES = r"""
# If fetch_secret_ids_from_folder=true then secret IDs are in a folder is fetched based on folder ID
- hosts: localhost
vars:
- secret: >-
- {{
- lookup(
- 'community.general.tss',
- 102,
- fetch_secret_ids_from_folder=true,
- base_url='https://secretserver.domain.com/SecretServer/',
- token='thycotic_access_token'
- )
- }}
+ secret: >-
+ {{
+ lookup(
+ 'community.general.tss',
+ 102,
+ fetch_secret_ids_from_folder=true,
+ base_url='https://secretserver.domain.com/SecretServer/',
+ token='thycotic_access_token'
+ )
+ }}
tasks:
- ansible.builtin.debug:
msg: >
@@ -238,25 +234,25 @@ EXAMPLES = r"""
# If secret ID is 0 and secret_path has value then secret is fetched by secret path
- hosts: localhost
vars:
- secret: >-
- {{
- lookup(
- 'community.general.tss',
- 0,
- secret_path='\folderName\secretName'
- base_url='https://secretserver.domain.com/SecretServer/',
- username='user.name',
- password='password'
- )
- }}
+ secret: >-
+ {{
+ lookup(
+ 'community.general.tss',
+ 0,
+ secret_path='\folderName\secretName'
+ base_url='https://secretserver.domain.com/SecretServer/',
+ username='user.name',
+ password='password'
+ )
+ }}
tasks:
- - ansible.builtin.debug:
- msg: >
- the password is {{
- (secret['items']
- | items2dict(key_name='slug',
- value_name='itemValue'))['password']
- }}
+ - ansible.builtin.debug:
+ msg: >-
+ the password is {{
+ (secret['items']
+ | items2dict(key_name='slug',
+ value_name='itemValue'))['password']
+ }}
"""
import abc
diff --git a/plugins/module_utils/django.py b/plugins/module_utils/django.py
index 8314ed945e..4f5293c09f 100644
--- a/plugins/module_utils/django.py
+++ b/plugins/module_utils/django.py
@@ -67,11 +67,9 @@ class _DjangoRunner(PythonRunner):
class DjangoModuleHelper(ModuleHelper):
module = {}
- use_old_vardict = False
django_admin_cmd = None
arg_formats = {}
django_admin_arg_order = ()
- use_old_vardict = False
_django_args = []
_check_mode_arg = ""
diff --git a/plugins/module_utils/identity/keycloak/keycloak.py b/plugins/module_utils/identity/keycloak/keycloak.py
index 648cad76bc..e053eca305 100644
--- a/plugins/module_utils/identity/keycloak/keycloak.py
+++ b/plugins/module_utils/identity/keycloak/keycloak.py
@@ -248,6 +248,29 @@ def _request_token_using_refresh_token(module_params):
return _token_request(module_params, payload)
+def _request_token_using_client_credentials(module_params):
+ """ Obtains connection header with token for the authentication,
+ using the provided auth_client_id and auth_client_secret by grant_type
+ client_credentials. Ensure that the used client uses client authorization
+ with service account roles enabled and required service roles assigned.
+ :param module_params: parameters of the module. Must include 'auth_client_id'
+ and 'auth_client_secret'..
+ :return: connection header
+ """
+ client_id = module_params.get('auth_client_id')
+ client_secret = module_params.get('auth_client_secret')
+
+ temp_payload = {
+ 'grant_type': 'client_credentials',
+ 'client_id': client_id,
+ 'client_secret': client_secret,
+ }
+ # Remove empty items, for instance missing client_secret
+ payload = {k: v for k, v in temp_payload.items() if v is not None}
+
+ return _token_request(module_params, payload)
+
+
def get_token(module_params):
""" Obtains connection header with token for the authentication,
token already given or obtained from credentials
@@ -257,7 +280,13 @@ def get_token(module_params):
token = module_params.get('token')
if token is None:
- token = _request_token_using_credentials(module_params)
+ auth_client_id = module_params.get('auth_client_id')
+ auth_client_secret = module_params.get('auth_client_secret')
+ auth_username = module_params.get('auth_username')
+ if auth_client_id is not None and auth_client_secret is not None and auth_username is None:
+ token = _request_token_using_client_credentials(module_params)
+ else:
+ token = _request_token_using_credentials(module_params)
return {
'Authorization': 'Bearer ' + token,
@@ -387,6 +416,21 @@ class KeycloakAPI(object):
r = make_request_catching_401()
+ if isinstance(r, Exception):
+ # Try to re-auth with client_id and client_secret, if available
+ auth_client_id = self.module.params.get('auth_client_id')
+ auth_client_secret = self.module.params.get('auth_client_secret')
+ if auth_client_id is not None and auth_client_secret is not None:
+ try:
+ token = _request_token_using_client_credentials(self.module.params)
+ self.restheaders['Authorization'] = 'Bearer ' + token
+
+ r = make_request_catching_401()
+ except KeycloakError as e:
+ # Token refresh returns 400 if token is expired/invalid, so continue on if we get a 400
+ if e.authError is not None and e.authError.code != 400:
+ raise e
+
if isinstance(r, Exception):
# Either no re-auth options were available, or they all failed
raise r
@@ -1551,7 +1595,7 @@ class KeycloakAPI(object):
if parent['subGroupCount'] == 0:
group_children = []
else:
- group_children_url = URL_GROUP_CHILDREN.format(url=self.baseurl, realm=realm, groupid=parent['id'])
+ group_children_url = URL_GROUP_CHILDREN.format(url=self.baseurl, realm=realm, groupid=parent['id']) + "?max=" + str(parent['subGroupCount'])
group_children = self._request_and_deserialize(group_children_url, method="GET")
subgroups = group_children
else:
@@ -1917,7 +1961,7 @@ class KeycloakAPI(object):
and composite["name"] == existing_composite["name"]):
composite_found = True
break
- if (not composite_found and ('state' not in composite or composite['state'] == 'present')):
+ if not composite_found and ('state' not in composite or composite['state'] == 'present'):
if "client_id" in composite and composite['client_id'] is not None:
client_roles = self.get_client_roles(clientid=composite['client_id'], realm=realm)
for client_role in client_roles:
diff --git a/plugins/module_utils/mh/mixins/deps.py b/plugins/module_utils/mh/mixins/deps.py
deleted file mode 100644
index dd879ff4b2..0000000000
--- a/plugins/module_utils/mh/mixins/deps.py
+++ /dev/null
@@ -1,38 +0,0 @@
-# -*- coding: utf-8 -*-
-# (c) 2020, Alexei Znamensky
-# Copyright (c) 2020, Ansible Project
-# Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause)
-# SPDX-License-Identifier: BSD-2-Clause
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-class DependencyCtxMgr(object):
- """
- DEPRECATION WARNING
-
- This class is deprecated and will be removed in community.general 11.0.0
- Modules should use plugins/module_utils/deps.py instead.
- """
- def __init__(self, name, msg=None):
- self.name = name
- self.msg = msg
- self.has_it = False
- self.exc_type = None
- self.exc_val = None
- self.exc_tb = None
-
- def __enter__(self):
- return self
-
- def __exit__(self, exc_type, exc_val, exc_tb):
- self.has_it = exc_type is None
- self.exc_type = exc_type
- self.exc_val = exc_val
- self.exc_tb = exc_tb
- return not self.has_it
-
- @property
- def text(self):
- return self.msg or str(self.exc_val)
diff --git a/plugins/module_utils/mh/mixins/vars.py b/plugins/module_utils/mh/mixins/vars.py
deleted file mode 100644
index 7db9904f93..0000000000
--- a/plugins/module_utils/mh/mixins/vars.py
+++ /dev/null
@@ -1,153 +0,0 @@
-# -*- coding: utf-8 -*-
-# (c) 2020, Alexei Znamensky
-# Copyright (c) 2020, Ansible Project
-# Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause)
-# SPDX-License-Identifier: BSD-2-Clause
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-import copy
-
-
-class VarMeta(object):
- """
- DEPRECATION WARNING
-
- This class is deprecated and will be removed in community.general 11.0.0
- Modules should use the VarDict from plugins/module_utils/vardict.py instead.
- """
-
- NOTHING = object()
-
- def __init__(self, diff=False, output=True, change=None, fact=False):
- self.init = False
- self.initial_value = None
- self.value = None
-
- self.diff = diff
- self.change = diff if change is None else change
- self.output = output
- self.fact = fact
-
- def set(self, diff=None, output=None, change=None, fact=None, initial_value=NOTHING):
- if diff is not None:
- self.diff = diff
- if output is not None:
- self.output = output
- if change is not None:
- self.change = change
- if fact is not None:
- self.fact = fact
- if initial_value is not self.NOTHING:
- self.initial_value = copy.deepcopy(initial_value)
-
- def set_value(self, value):
- if not self.init:
- self.initial_value = copy.deepcopy(value)
- self.init = True
- self.value = value
- return self
-
- @property
- def has_changed(self):
- return self.change and (self.initial_value != self.value)
-
- @property
- def diff_result(self):
- return None if not (self.diff and self.has_changed) else {
- 'before': self.initial_value,
- 'after': self.value,
- }
-
- def __str__(self):
- return "".format(
- self.value, self.initial_value, self.diff, self.output, self.change
- )
-
-
-class VarDict(object):
- """
- DEPRECATION WARNING
-
- This class is deprecated and will be removed in community.general 11.0.0
- Modules should use the VarDict from plugins/module_utils/vardict.py instead.
- """
- def __init__(self):
- self._data = dict()
- self._meta = dict()
-
- def __getitem__(self, item):
- return self._data[item]
-
- def __setitem__(self, key, value):
- self.set(key, value)
-
- def __getattr__(self, item):
- try:
- return self._data[item]
- except KeyError:
- return getattr(self._data, item)
-
- def __setattr__(self, key, value):
- if key in ('_data', '_meta'):
- super(VarDict, self).__setattr__(key, value)
- else:
- self.set(key, value)
-
- def meta(self, name):
- return self._meta[name]
-
- def set_meta(self, name, **kwargs):
- self.meta(name).set(**kwargs)
-
- def set(self, name, value, **kwargs):
- if name in ('_data', '_meta'):
- raise ValueError("Names _data and _meta are reserved for use by ModuleHelper")
- self._data[name] = value
- if name in self._meta:
- meta = self.meta(name)
- else:
- meta = VarMeta(**kwargs)
- meta.set_value(value)
- self._meta[name] = meta
-
- def output(self):
- return {k: v for k, v in self._data.items() if self.meta(k).output}
-
- def diff(self):
- diff_results = [(k, self.meta(k).diff_result) for k in self._data]
- diff_results = [dr for dr in diff_results if dr[1] is not None]
- if diff_results:
- before = dict((dr[0], dr[1]['before']) for dr in diff_results)
- after = dict((dr[0], dr[1]['after']) for dr in diff_results)
- return {'before': before, 'after': after}
- return None
-
- def facts(self):
- facts_result = {k: v for k, v in self._data.items() if self._meta[k].fact}
- return facts_result if facts_result else None
-
- def change_vars(self):
- return [v for v in self._data if self.meta(v).change]
-
- def has_changed(self, v):
- return self._meta[v].has_changed
-
-
-class VarsMixin(object):
- """
- DEPRECATION WARNING
-
- This class is deprecated and will be removed in community.general 11.0.0
- Modules should use the VarDict from plugins/module_utils/vardict.py instead.
- """
- def __init__(self, module=None):
- self.vars = VarDict()
- super(VarsMixin, self).__init__(module)
-
- def update_vars(self, meta=None, **kwargs):
- if meta is None:
- meta = {}
- for k, v in kwargs.items():
- self.vars.set(k, v, **meta)
diff --git a/plugins/module_utils/mh/module_helper.py b/plugins/module_utils/mh/module_helper.py
index ca95199d9b..f0e2ad6e96 100644
--- a/plugins/module_utils/mh/module_helper.py
+++ b/plugins/module_utils/mh/module_helper.py
@@ -10,13 +10,9 @@ __metaclass__ = type
from ansible.module_utils.common.dict_transformations import dict_merge
-from ansible_collections.community.general.plugins.module_utils.vardict import VarDict as _NewVarDict # remove "as NewVarDict" in 11.0.0
-# (TODO: remove AnsibleModule!) pylint: disable-next=unused-import
-from ansible_collections.community.general.plugins.module_utils.mh.base import AnsibleModule # noqa: F401 DEPRECATED, remove in 11.0.0
+from ansible_collections.community.general.plugins.module_utils.vardict import VarDict
from ansible_collections.community.general.plugins.module_utils.mh.base import ModuleHelperBase
from ansible_collections.community.general.plugins.module_utils.mh.mixins.state import StateMixin
-# (TODO: remove mh.mixins.vars!) pylint: disable-next=unused-import
-from ansible_collections.community.general.plugins.module_utils.mh.mixins.vars import VarsMixin, VarDict as _OldVarDict # noqa: F401 remove in 11.0.0
from ansible_collections.community.general.plugins.module_utils.mh.mixins.deprecate_attrs import DeprecateAttrsMixin
@@ -26,24 +22,11 @@ class ModuleHelper(DeprecateAttrsMixin, ModuleHelperBase):
diff_params = ()
change_params = ()
facts_params = ()
- use_old_vardict = True # remove in 11.0.0
- mute_vardict_deprecation = False
def __init__(self, module=None):
- if self.use_old_vardict: # remove first half of the if in 11.0.0
- self.vars = _OldVarDict()
- super(ModuleHelper, self).__init__(module)
- if not self.mute_vardict_deprecation:
- self.module.deprecate(
- "This class is using the old VarDict from ModuleHelper, which is deprecated. "
- "Set the class variable use_old_vardict to False and make the necessary adjustments."
- "The old VarDict class will be removed in community.general 11.0.0",
- version="11.0.0", collection_name="community.general"
- )
- else:
- self.vars = _NewVarDict()
- super(ModuleHelper, self).__init__(module)
+ super(ModuleHelper, self).__init__(module)
+ self.vars = VarDict()
for name, value in self.module.params.items():
self.vars.set(
name, value,
@@ -66,9 +49,6 @@ class ModuleHelper(DeprecateAttrsMixin, ModuleHelperBase):
self.update_vars(meta={"fact": True}, **kwargs)
def _vars_changed(self):
- if self.use_old_vardict:
- return any(self.vars.has_changed(v) for v in self.vars.change_vars())
-
return self.vars.has_changed
def has_changed(self):
diff --git a/plugins/module_utils/module_helper.py b/plugins/module_utils/module_helper.py
index 366699329a..f70ae3515d 100644
--- a/plugins/module_utils/module_helper.py
+++ b/plugins/module_utils/module_helper.py
@@ -11,12 +11,8 @@ __metaclass__ = type
from ansible_collections.community.general.plugins.module_utils.mh.module_helper import (
ModuleHelper, StateModuleHelper,
- AnsibleModule # remove in 11.0.0
)
-from ansible_collections.community.general.plugins.module_utils.mh.mixins.state import StateMixin # noqa: F401 remove in 11.0.0
-from ansible_collections.community.general.plugins.module_utils.mh.mixins.deps import DependencyCtxMgr # noqa: F401 remove in 11.0.0
from ansible_collections.community.general.plugins.module_utils.mh.exceptions import ModuleHelperException # noqa: F401
from ansible_collections.community.general.plugins.module_utils.mh.deco import (
cause_changes, module_fails_on_exception, check_mode_skip, check_mode_skip_returns,
)
-from ansible_collections.community.general.plugins.module_utils.mh.mixins.vars import VarMeta, VarDict, VarsMixin # noqa: F401 remove in 11.0.0
diff --git a/plugins/module_utils/pacemaker.py b/plugins/module_utils/pacemaker.py
index 9f1456e75c..f0f54cce9d 100644
--- a/plugins/module_utils/pacemaker.py
+++ b/plugins/module_utils/pacemaker.py
@@ -14,7 +14,12 @@ _state_map = {
"absent": "remove",
"status": "status",
"enabled": "enable",
- "disabled": "disable"
+ "disabled": "disable",
+ "online": "start",
+ "offline": "stop",
+ "maintenance": "set",
+ "config": "config",
+ "cleanup": "cleanup",
}
@@ -37,11 +42,20 @@ def fmt_resource_argument(value):
return ['--group' if value['argument_action'] == 'group' else value['argument_action']] + value['argument_option']
-def pacemaker_runner(module, cli_action, **kwargs):
+def get_pacemaker_maintenance_mode(runner):
+ with runner("cli_action config") as ctx:
+ rc, out, err = ctx.run(cli_action="property")
+ maintenance_mode_output = list(filter(lambda string: "maintenance-mode=true" in string.lower(), out.splitlines()))
+ return bool(maintenance_mode_output)
+
+
+def pacemaker_runner(module, **kwargs):
+ runner_command = ['pcs']
runner = CmdRunner(
module,
- command=['pcs', cli_action],
+ command=runner_command,
arg_formats=dict(
+ cli_action=cmd_runner_fmt.as_list(),
state=cmd_runner_fmt.as_map(_state_map),
name=cmd_runner_fmt.as_list(),
resource_type=cmd_runner_fmt.as_func(fmt_resource_type),
@@ -49,7 +63,10 @@ def pacemaker_runner(module, cli_action, **kwargs):
resource_operation=cmd_runner_fmt.as_func(fmt_resource_operation),
resource_meta=cmd_runner_fmt.stack(cmd_runner_fmt.as_opt_val)("meta"),
resource_argument=cmd_runner_fmt.as_func(fmt_resource_argument),
+ apply_all=cmd_runner_fmt.as_bool("--all"),
wait=cmd_runner_fmt.as_opt_eq_val("--wait"),
+ config=cmd_runner_fmt.as_fixed("config"),
+ force=cmd_runner_fmt.as_bool("--force"),
),
**kwargs
)
diff --git a/plugins/module_utils/pipx.py b/plugins/module_utils/pipx.py
index de43f80b40..bb37712c21 100644
--- a/plugins/module_utils/pipx.py
+++ b/plugins/module_utils/pipx.py
@@ -71,36 +71,51 @@ def pipx_runner(module, command, **kwargs):
return runner
-def make_process_list(mod_helper, **kwargs):
- def process_list(rc, out, err):
- if not out:
- return []
+def _make_entry(venv_name, venv, include_injected, include_deps):
+ entry = {
+ 'name': venv_name,
+ 'version': venv['metadata']['main_package']['package_version'],
+ 'pinned': venv['metadata']['main_package'].get('pinned'),
+ }
+ if include_injected:
+ entry['injected'] = {k: v['package_version'] for k, v in venv['metadata']['injected_packages'].items()}
+ if include_deps:
+ entry['dependencies'] = list(venv['metadata']['main_package']['app_paths_of_dependencies'])
+ return entry
- results = []
+
+def make_process_dict(include_injected, include_deps=False):
+ def process_dict(rc, out, err):
+ if not out:
+ return {}
+
+ results = {}
raw_data = json.loads(out)
+ for venv_name, venv in raw_data['venvs'].items():
+ results[venv_name] = _make_entry(venv_name, venv, include_injected, include_deps)
+
+ return results, raw_data
+
+ return process_dict
+
+
+def make_process_list(mod_helper, **kwargs):
+ #
+ # ATTENTION!
+ #
+ # The function `make_process_list()` is deprecated and will be removed in community.general 13.0.0
+ #
+ process_dict = make_process_dict(mod_helper, **kwargs)
+
+ def process_list(rc, out, err):
+ res_dict, raw_data = process_dict(rc, out, err)
+
if kwargs.get("include_raw"):
mod_helper.vars.raw_output = raw_data
- if kwargs["name"]:
- if kwargs["name"] in raw_data['venvs']:
- data = {kwargs["name"]: raw_data['venvs'][kwargs["name"]]}
- else:
- data = {}
- else:
- data = raw_data['venvs']
-
- for venv_name, venv in data.items():
- entry = {
- 'name': venv_name,
- 'version': venv['metadata']['main_package']['package_version'],
- 'pinned': venv['metadata']['main_package'].get('pinned'),
- }
- if kwargs.get("include_injected"):
- entry['injected'] = {k: v['package_version'] for k, v in venv['metadata']['injected_packages'].items()}
- if kwargs.get("include_deps"):
- entry['dependencies'] = list(venv['metadata']['main_package']['app_paths_of_dependencies'])
- results.append(entry)
-
- return results
-
+ return [
+ entry
+ for name, entry in res_dict.items()
+ if name == kwargs.get("name")
+ ]
return process_list
diff --git a/plugins/module_utils/pkg_req.py b/plugins/module_utils/pkg_req.py
new file mode 100644
index 0000000000..8e82ffd360
--- /dev/null
+++ b/plugins/module_utils/pkg_req.py
@@ -0,0 +1,75 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2025, Alexei Znamensky
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+from ansible.module_utils.six import raise_from
+
+from ansible_collections.community.general.plugins.module_utils import deps
+
+
+with deps.declare("packaging"):
+ from packaging.requirements import Requirement
+ from packaging.version import parse as parse_version, InvalidVersion
+
+
+class PackageRequirement:
+ def __init__(self, module, name):
+ self.module = module
+ self.parsed_name, self.requirement = self._parse_spec(name)
+
+ def _parse_spec(self, name):
+ """
+ Parse a package name that may include version specifiers using PEP 508.
+ Returns a tuple of (name, requirement) where requirement is of type packaging.requirements.Requirement and it may be None.
+
+ Example inputs:
+ "package"
+ "package>=1.0"
+ "package>=1.0,<2.0"
+ "package[extra]>=1.0"
+ "package[foo,bar]>=1.0,!=1.5"
+
+ :param name: Package name with optional version specifiers and extras
+ :return: Tuple of (name, requirement)
+ :raises ValueError: If the package specification is invalid
+ """
+ if not name:
+ return name, None
+
+ # Quick check for simple package names
+ if not any(c in name for c in '>=
-# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-import traceback
-from time import sleep
-
-PROXMOXER_IMP_ERR = None
-try:
- from proxmoxer import ProxmoxAPI
- from proxmoxer import __version__ as proxmoxer_version
- HAS_PROXMOXER = True
-except ImportError:
- HAS_PROXMOXER = False
- PROXMOXER_IMP_ERR = traceback.format_exc()
-
-
-from ansible.module_utils.basic import env_fallback, missing_required_lib
-from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
-
-
-def proxmox_auth_argument_spec():
- return dict(
- api_host=dict(type='str',
- required=True,
- fallback=(env_fallback, ['PROXMOX_HOST'])
- ),
- api_port=dict(type='int',
- fallback=(env_fallback, ['PROXMOX_PORT'])
- ),
- api_user=dict(type='str',
- required=True,
- fallback=(env_fallback, ['PROXMOX_USER'])
- ),
- api_password=dict(type='str',
- no_log=True,
- fallback=(env_fallback, ['PROXMOX_PASSWORD'])
- ),
- api_token_id=dict(type='str',
- no_log=False
- ),
- api_token_secret=dict(type='str',
- no_log=True
- ),
- validate_certs=dict(type='bool',
- default=False
- ),
- )
-
-
-def proxmox_to_ansible_bool(value):
- '''Convert Proxmox representation of a boolean to be ansible-friendly'''
- return True if value == 1 else False
-
-
-def ansible_to_proxmox_bool(value):
- '''Convert Ansible representation of a boolean to be proxmox-friendly'''
- if value is None:
- return None
-
- if not isinstance(value, bool):
- raise ValueError("%s must be of type bool not %s" % (value, type(value)))
-
- return 1 if value else 0
-
-
-class ProxmoxAnsible(object):
- """Base class for Proxmox modules"""
- TASK_TIMED_OUT = 'timeout expired'
-
- def __init__(self, module):
- if not HAS_PROXMOXER:
- module.fail_json(msg=missing_required_lib('proxmoxer'), exception=PROXMOXER_IMP_ERR)
-
- self.module = module
- self.proxmoxer_version = proxmoxer_version
- self.proxmox_api = self._connect()
- # Test token validity
- try:
- self.proxmox_api.version.get()
- except Exception as e:
- module.fail_json(msg='%s' % e, exception=traceback.format_exc())
-
- def _connect(self):
- api_host = self.module.params['api_host']
- api_port = self.module.params['api_port']
- api_user = self.module.params['api_user']
- api_password = self.module.params['api_password']
- api_token_id = self.module.params['api_token_id']
- api_token_secret = self.module.params['api_token_secret']
- validate_certs = self.module.params['validate_certs']
-
- auth_args = {'user': api_user}
-
- if api_port:
- auth_args['port'] = api_port
-
- if api_password:
- auth_args['password'] = api_password
- else:
- if self.proxmoxer_version < LooseVersion('1.1.0'):
- self.module.fail_json('Using "token_name" and "token_value" require proxmoxer>=1.1.0')
- auth_args['token_name'] = api_token_id
- auth_args['token_value'] = api_token_secret
-
- try:
- return ProxmoxAPI(api_host, verify_ssl=validate_certs, **auth_args)
- except Exception as e:
- self.module.fail_json(msg='%s' % e, exception=traceback.format_exc())
-
- def version(self):
- try:
- apiversion = self.proxmox_api.version.get()
- return LooseVersion(apiversion['version'])
- except Exception as e:
- self.module.fail_json(msg='Unable to retrieve Proxmox VE version: %s' % e)
-
- def get_node(self, node):
- try:
- nodes = [n for n in self.proxmox_api.nodes.get() if n['node'] == node]
- except Exception as e:
- self.module.fail_json(msg='Unable to retrieve Proxmox VE node: %s' % e)
- return nodes[0] if nodes else None
-
- def get_nextvmid(self):
- try:
- return self.proxmox_api.cluster.nextid.get()
- except Exception as e:
- self.module.fail_json(msg='Unable to retrieve next free vmid: %s' % e)
-
- def get_vmid(self, name, ignore_missing=False, choose_first_if_multiple=False):
- try:
- vms = [vm['vmid'] for vm in self.proxmox_api.cluster.resources.get(type='vm') if vm.get('name') == name]
- except Exception as e:
- self.module.fail_json(msg='Unable to retrieve list of VMs filtered by name %s: %s' % (name, e))
-
- if not vms:
- if ignore_missing:
- return None
-
- self.module.fail_json(msg='No VM with name %s found' % name)
- elif len(vms) > 1 and not choose_first_if_multiple:
- self.module.fail_json(msg='Multiple VMs with name %s found, provide vmid instead' % name)
-
- return vms[0]
-
- def get_vm(self, vmid, ignore_missing=False):
- try:
- vms = [vm for vm in self.proxmox_api.cluster.resources.get(type='vm') if vm['vmid'] == int(vmid)]
- except Exception as e:
- self.module.fail_json(msg='Unable to retrieve list of VMs filtered by vmid %s: %s' % (vmid, e))
-
- if vms:
- return vms[0]
- else:
- if ignore_missing:
- return None
-
- self.module.fail_json(msg='VM with vmid %s does not exist in cluster' % vmid)
-
- def api_task_ok(self, node, taskid):
- try:
- status = self.proxmox_api.nodes(node).tasks(taskid).status.get()
- return status['status'] == 'stopped' and status['exitstatus'] == 'OK'
- except Exception as e:
- self.module.fail_json(msg='Unable to retrieve API task ID from node %s: %s' % (node, e))
-
- def api_task_failed(self, node, taskid):
- """ Explicitly check if the task stops but exits with a failed status
- """
- try:
- status = self.proxmox_api.nodes(node).tasks(taskid).status.get()
- return status['status'] == 'stopped' and status['exitstatus'] != 'OK'
- except Exception as e:
- self.module.fail_json(msg='Unable to retrieve API task ID from node %s: %s' % (node, e))
-
- def api_task_complete(self, node_name, task_id, timeout):
- """Wait until the task stops or times out.
-
- :param node_name: Proxmox node name where the task is running.
- :param task_id: ID of the running task.
- :param timeout: Timeout in seconds to wait for the task to complete.
- :return: Task completion status (True/False) and ``exitstatus`` message when status=False.
- """
- status = {}
- while timeout:
- try:
- status = self.proxmox_api.nodes(node_name).tasks(task_id).status.get()
- except Exception as e:
- self.module.fail_json(msg='Unable to retrieve API task ID from node %s: %s' % (node_name, e))
-
- if status['status'] == 'stopped':
- if status['exitstatus'] == 'OK':
- return True, None
- else:
- return False, status['exitstatus']
- else:
- timeout -= 1
- if timeout <= 0:
- return False, ProxmoxAnsible.TASK_TIMED_OUT
- sleep(1)
-
- def get_pool(self, poolid):
- """Retrieve pool information
-
- :param poolid: str - name of the pool
- :return: dict - pool information
- """
- try:
- return self.proxmox_api.pools(poolid).get()
- except Exception as e:
- self.module.fail_json(msg="Unable to retrieve pool %s information: %s" % (poolid, e))
-
- def get_storages(self, type):
- """Retrieve storages information
-
- :param type: str, optional - type of storages
- :return: list of dicts - array of storages
- """
- try:
- return self.proxmox_api.storage.get(type=type)
- except Exception as e:
- self.module.fail_json(msg="Unable to retrieve storages information with type %s: %s" % (type, e))
-
- def get_storage_content(self, node, storage, content=None, vmid=None):
- try:
- return (
- self.proxmox_api.nodes(node)
- .storage(storage)
- .content()
- .get(content=content, vmid=vmid)
- )
- except Exception as e:
- self.module.fail_json(
- msg="Unable to list content on %s, %s for %s and %s: %s"
- % (node, storage, content, vmid, e)
- )
diff --git a/plugins/module_utils/python_runner.py b/plugins/module_utils/python_runner.py
index b65867c61e..a8e9e651be 100644
--- a/plugins/module_utils/python_runner.py
+++ b/plugins/module_utils/python_runner.py
@@ -19,7 +19,7 @@ class PythonRunner(CmdRunner):
self.venv = venv
self.has_venv = venv is not None
- if (os.path.isabs(python) or '/' in python):
+ if os.path.isabs(python) or '/' in python:
self.python = python
elif self.has_venv:
if path_prefix is None:
diff --git a/plugins/module_utils/redfish_utils.py b/plugins/module_utils/redfish_utils.py
index f7332aa99b..bc93f0e498 100644
--- a/plugins/module_utils/redfish_utils.py
+++ b/plugins/module_utils/redfish_utils.py
@@ -10,9 +10,7 @@ import json
import os
import random
import string
-import gzip
import time
-from io import BytesIO
from ansible.module_utils.urls import open_url
from ansible.module_utils.common.text.converters import to_native
from ansible.module_utils.common.text.converters import to_text
@@ -21,8 +19,6 @@ from ansible.module_utils.six import text_type
from ansible.module_utils.six.moves import http_client
from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
from ansible.module_utils.six.moves.urllib.parse import urlparse
-from ansible.module_utils.ansible_release import __version__ as ansible_version
-from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
GET_HEADERS = {'accept': 'application/json', 'OData-Version': '4.0'}
POST_HEADERS = {'content-type': 'application/json', 'accept': 'application/json',
@@ -183,12 +179,7 @@ class RedfishUtils(object):
timeout=timeout,
)
try:
- if headers.get('content-encoding') == 'gzip' and LooseVersion(ansible_version) < LooseVersion('2.14'):
- # Older versions of Ansible do not automatically decompress the data
- # Starting in 2.14, open_url will decompress the response data by default
- data = json.loads(to_native(gzip.open(BytesIO(resp.read()), 'rt', encoding='utf-8').read()))
- else:
- data = json.loads(to_native(resp.read()))
+ data = json.loads(to_native(resp.read()))
except Exception as e:
# No response data; this is okay in certain cases
data = None
@@ -451,9 +442,6 @@ class RedfishUtils(object):
pass
return msg, data
- def _init_session(self):
- self.module.deprecate("Method _init_session is deprecated and will be removed.", version="11.0.0", collection_name="community.general")
-
def _get_vendor(self):
# If we got the vendor info once, don't get it again
if self._vendor is not None:
diff --git a/plugins/module_utils/xdg_mime.py b/plugins/module_utils/xdg_mime.py
new file mode 100644
index 0000000000..f84b9ef7ea
--- /dev/null
+++ b/plugins/module_utils/xdg_mime.py
@@ -0,0 +1,36 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2025, Marcos Alano
+# Based on gio_mime module. Copyright (c) 2022, Alexei Znamensky
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+from ansible_collections.community.general.plugins.module_utils.cmd_runner import CmdRunner, cmd_runner_fmt
+
+
+def xdg_mime_runner(module, **kwargs):
+ return CmdRunner(
+ module,
+ command=['xdg-mime'],
+ arg_formats=dict(
+ default=cmd_runner_fmt.as_fixed('default'),
+ query=cmd_runner_fmt.as_fixed('query'),
+ mime_types=cmd_runner_fmt.as_list(),
+ handler=cmd_runner_fmt.as_list(),
+ version=cmd_runner_fmt.as_fixed('--version'),
+ ),
+ **kwargs
+ )
+
+
+def xdg_mime_get(runner, mime_type):
+ def process(rc, out, err):
+ if not out.strip():
+ return None
+ out = out.splitlines()[0]
+ return out.split()[-1]
+
+ with runner("query default mime_types", output_process=process) as ctx:
+ return ctx.run(mime_types=mime_type)
diff --git a/plugins/modules/aerospike_migrations.py b/plugins/modules/aerospike_migrations.py
index 9a6084a6a1..d9440fdb4e 100644
--- a/plugins/modules/aerospike_migrations.py
+++ b/plugins/modules/aerospike_migrations.py
@@ -29,7 +29,6 @@ options:
host:
description:
- Which host do we use as seed for info connection.
- required: false
type: str
default: localhost
port:
@@ -70,7 +69,7 @@ options:
type: bool
min_cluster_size:
description:
- - Check will return bad until cluster size is met or until tries is exhausted.
+ - Check fails until cluster size is met or until tries is exhausted.
required: false
type: int
default: 1
@@ -94,10 +93,10 @@ options:
default: migrate_rx_partitions_remaining
target_cluster_size:
description:
- - When all aerospike builds in the cluster are greater than version 4.3, then the C(cluster-stable) info command will
- be used. Inside this command, you can optionally specify what the target cluster size is - but it is not necessary.
+ - When all aerospike builds in the cluster are greater than version 4.3, then the C(cluster-stable) info command is
+ used. Inside this command, you can optionally specify what the target cluster size is - but it is not necessary.
You can still rely on O(min_cluster_size) if you do not want to use this option.
- - If this option is specified on a cluster that has at least one host <4.3 then it will be ignored until the min version
+ - If this option is specified on a cluster that has at least one host <4.3 then it is ignored until the min version
reaches 4.3.
required: false
type: int
@@ -180,19 +179,19 @@ else:
def run_module():
"""run ansible module"""
module_args = dict(
- host=dict(type='str', required=False, default='localhost'),
- port=dict(type='int', required=False, default=3000),
- connect_timeout=dict(type='int', required=False, default=1000),
- consecutive_good_checks=dict(type='int', required=False, default=3),
- sleep_between_checks=dict(type='int', required=False, default=60),
- tries_limit=dict(type='int', required=False, default=300),
+ host=dict(type='str', default='localhost'),
+ port=dict(type='int', default=3000),
+ connect_timeout=dict(type='int', default=1000),
+ consecutive_good_checks=dict(type='int', default=3),
+ sleep_between_checks=dict(type='int', default=60),
+ tries_limit=dict(type='int', default=300),
local_only=dict(type='bool', required=True),
- min_cluster_size=dict(type='int', required=False, default=1),
- target_cluster_size=dict(type='int', required=False, default=None),
- fail_on_cluster_change=dict(type='bool', required=False, default=True),
- migrate_tx_key=dict(type='str', required=False, no_log=False,
+ min_cluster_size=dict(type='int', default=1),
+ target_cluster_size=dict(type='int'),
+ fail_on_cluster_change=dict(type='bool', default=True),
+ migrate_tx_key=dict(type='str', no_log=False,
default="migrate_tx_partitions_remaining"),
- migrate_rx_key=dict(type='str', required=False, no_log=False,
+ migrate_rx_key=dict(type='str', no_log=False,
default="migrate_rx_partitions_remaining")
)
diff --git a/plugins/modules/airbrake_deployment.py b/plugins/modules/airbrake_deployment.py
index d772062da4..0fe04f21d6 100644
--- a/plugins/modules/airbrake_deployment.py
+++ b/plugins/modules/airbrake_deployment.py
@@ -71,7 +71,7 @@ options:
type: str
validate_certs:
description:
- - If V(false), SSL certificates for the target URL will not be validated. This should only be used on personally controlled
+ - If V(false), SSL certificates for the target URL is not validated. This should only be used on personally controlled
sites using self-signed certificates.
required: false
default: true
@@ -114,11 +114,11 @@ def main():
project_id=dict(required=True, no_log=True, type='str'),
project_key=dict(required=True, no_log=True, type='str'),
environment=dict(required=True, type='str'),
- user=dict(required=False, type='str'),
- repo=dict(required=False, type='str'),
- revision=dict(required=False, type='str'),
- version=dict(required=False, type='str'),
- url=dict(required=False, default='https://api.airbrake.io/api/v4/projects/', type='str'),
+ user=dict(type='str'),
+ repo=dict(type='str'),
+ revision=dict(type='str'),
+ version=dict(type='str'),
+ url=dict(default='https://api.airbrake.io/api/v4/projects/', type='str'),
validate_certs=dict(default=True, type='bool'),
),
supports_check_mode=True,
diff --git a/plugins/modules/aix_filesystem.py b/plugins/modules/aix_filesystem.py
index 8934d583ff..92a734e8ac 100644
--- a/plugins/modules/aix_filesystem.py
+++ b/plugins/modules/aix_filesystem.py
@@ -47,7 +47,7 @@ options:
description:
- Logical volume (LV) device name or remote export device to create a NFS file system.
- It is used to create a file system on an already existing logical volume or the exported NFS file system.
- - If not mentioned a new logical volume name will be created following AIX standards (LVM).
+ - If not mentioned a new logical volume name is created following AIX standards (LVM).
type: str
fs_type:
description:
@@ -81,14 +81,14 @@ options:
size:
description:
- Specifies the file system size.
- - For already V(present) it will be resized.
- - 512-byte blocks, Megabytes or Gigabytes. If the value has M specified it will be in Megabytes. If the value has G
- specified it will be in Gigabytes.
- - If no M or G the value will be 512-byte blocks.
- - If "+" is specified in begin of value, the value will be added.
- - If "-" is specified in begin of value, the value will be removed.
- - If "+" or "-" is not specified, the total value will be the specified.
- - Size will respects the LVM AIX standards.
+ - For already present it resizes the filesystem.
+ - 512-byte blocks, megabytes or gigabytes. If the value has M specified it is in megabytes. If the value has G specified
+ it is in gigabytes.
+ - If no M or G the value is 512-byte blocks.
+ - If V(+) is specified in begin of value, the value is added.
+ - If V(-) is specified in begin of value, the value is removed.
+ - If neither V(+) nor V(-) is specified, then the total value is the specified.
+ - Size respects the LVM AIX standards.
type: str
state:
description:
@@ -165,16 +165,6 @@ EXAMPLES = r"""
state: absent
"""
-RETURN = r"""
-changed:
- description: Return changed for aix_filesystems actions as true or false.
- returned: always
- type: bool
-msg:
- description: Return message regarding the action.
- returned: always
- type: str
-"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.general.plugins.module_utils._mount import ismount
diff --git a/plugins/modules/aix_inittab.py b/plugins/modules/aix_inittab.py
index 0c32f91e7f..ece4e95547 100644
--- a/plugins/modules/aix_inittab.py
+++ b/plugins/modules/aix_inittab.py
@@ -112,16 +112,6 @@ name:
returned: always
type: str
sample: startmyservice
-msg:
- description: Action done with the C(inittab) entry.
- returned: changed
- type: str
- sample: changed inittab entry startmyservice
-changed:
- description: Whether the C(inittab) changed or not.
- returned: always
- type: bool
- sample: true
"""
# Import necessary libraries
diff --git a/plugins/modules/aix_lvg.py b/plugins/modules/aix_lvg.py
index 29c0b7d3f9..c41e21124e 100644
--- a/plugins/modules/aix_lvg.py
+++ b/plugins/modules/aix_lvg.py
@@ -36,7 +36,7 @@ options:
description:
- List of comma-separated devices to use as physical devices in this volume group.
- Required when creating or extending (V(present) state) the volume group.
- - If not informed reducing (V(absent) state) the volume group will be removed.
+ - If not informed reducing (V(absent) state) the volume group is removed.
type: list
elements: str
state:
@@ -57,7 +57,7 @@ options:
choices: [big, normal, scalable]
default: normal
notes:
- - AIX will permit remove VG only if all LV/Filesystems are not busy.
+ - AIX allows removing VG only if all LV/Filesystems are not busy.
- Module does not modify PP size for already present volume group.
"""
diff --git a/plugins/modules/ali_instance.py b/plugins/modules/ali_instance.py
index 1a66850e14..050794d55c 100644
--- a/plugins/modules/ali_instance.py
+++ b/plugins/modules/ali_instance.py
@@ -45,8 +45,7 @@ options:
type: str
availability_zone:
description:
- - Aliyun availability zone ID in which to launch the instance. If it is not specified, it will be allocated by system
- automatically.
+ - Aliyun availability zone ID in which to launch the instance. If it is not specified, it is allocated by system automatically.
aliases: ['alicloud_zone', 'zone_id']
type: str
image_id:
@@ -109,7 +108,7 @@ options:
version_added: '0.2.0'
password:
description:
- - The password to login instance. After rebooting instances, modified password will take effect.
+ - The password to login instance. After rebooting instances, modified password is effective.
type: str
system_disk_category:
description:
@@ -140,7 +139,7 @@ options:
description:
- O(count) determines how many instances based on a specific tag criteria should be present. This can be expressed in
multiple ways and is shown in the EXAMPLES section. The specified count_tag must already exist or be passed in as
- the O(tags) option. If it is not specified, it will be replaced by O(instance_name).
+ the O(tags) option. If it is not specified, it is replaced by O(instance_name).
type: str
allocate_public_ip:
description:
@@ -172,8 +171,7 @@ options:
type: int
instance_ids:
description:
- - A list of instance IDs. It is required when need to operate existing instances. If it is specified, O(count) will
- lose efficacy.
+ - A list of instance IDs. It is required when need to operate existing instances. If it is specified, O(count) is ignored.
type: list
elements: str
force:
@@ -203,7 +201,7 @@ options:
user_data:
description:
- User-defined data to customize the startup behaviors of an ECS instance and to pass data into an ECS instance. It
- only will take effect when launching the new ECS instances.
+ only takes effect when launching the new ECS instances.
required: false
type: str
ram_role_name:
@@ -226,7 +224,7 @@ options:
version_added: '0.2.0'
period_unit:
description:
- - The duration unit that you will buy the resource. It is valid when O(instance_charge_type=PrePaid).
+ - The duration unit that you are buying the resource. It is valid when O(instance_charge_type=PrePaid).
choices: ['Month', 'Week']
default: 'Month'
type: str
@@ -424,7 +422,7 @@ instances:
type: str
sample: 42.10.2.2
expired_time:
- description: The time the instance will expire.
+ description: The time the instance expires.
returned: always
type: str
sample: "2099-12-31T15:59Z"
@@ -615,7 +613,7 @@ ids:
description: List of ECS instance IDs.
returned: always
type: list
- sample: [i-12345er, i-3245fs]
+ sample: ["i-12345er", "i-3245fs"]
"""
import re
diff --git a/plugins/modules/ali_instance_info.py b/plugins/modules/ali_instance_info.py
index 00e77b1ab2..7be5b8cda6 100644
--- a/plugins/modules/ali_instance_info.py
+++ b/plugins/modules/ali_instance_info.py
@@ -167,7 +167,7 @@ instances:
type: str
sample: 42.10.2.2
expired_time:
- description: The time the instance will expire.
+ description: The time the instance expires.
returned: always
type: str
sample: "2099-12-31T15:59Z"
@@ -341,7 +341,7 @@ ids:
description: List of ECS instance IDs.
returned: always
type: list
- sample: [i-12345er, i-3245fs]
+ sample: ["i-12345er", "i-3245fs"]
"""
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
diff --git a/plugins/modules/android_sdk.py b/plugins/modules/android_sdk.py
index a604a510ed..a9bffa50ea 100644
--- a/plugins/modules/android_sdk.py
+++ b/plugins/modules/android_sdk.py
@@ -28,8 +28,8 @@ version_added: 10.2.0
options:
accept_licenses:
description:
- - If this is set to V(true), the module will try to accept license prompts generated by C(sdkmanager) during package
- installation. Otherwise, every license prompt will be rejected.
+ - If this is set to V(true), the module attempts to accept license prompts generated by C(sdkmanager) during package
+ installation. Otherwise, every license prompt is rejected.
type: bool
default: false
name:
@@ -64,16 +64,16 @@ requirements:
notes:
- For some of the packages installed by C(sdkmanager) is it necessary to accept licenses. Usually it is done through command
line prompt in a form of a Y/N question when a licensed package is requested to be installed. If there are several packages
- requested for installation and at least two of them belong to different licenses, the C(sdkmanager) tool will prompt for
- these licenses in a loop. In order to install packages, the module must be able to answer these license prompts. Currently,
+ requested for installation and at least two of them belong to different licenses, the C(sdkmanager) tool prompts for these
+ licenses in a loop. In order to install packages, the module must be able to answer these license prompts. Currently,
it is only possible to answer one license prompt at a time, meaning that instead of installing multiple packages as a
- single invocation of the C(sdkmanager --install) command, it will be done by executing the command independently for each
- package. This makes sure that at most only one license prompt will need to be answered. At the time of writing this module,
- a C(sdkmanager)'s package may belong to at most one license type that needs to be accepted. However, if this changes in
- the future, the module may hang as there might be more prompts generated by the C(sdkmanager) tool which the module will
- not be able to answer. If this becomes the case, file an issue and in the meantime, consider accepting all the licenses
- in advance, as it is described in the C(sdkmanager) L(documentation,https://developer.android.com/tools/sdkmanager#accept-licenses),
- for instance, using the M(ansible.builtin.command) module.
+ single invocation of the C(sdkmanager --install) command, it is done by executing the command independently for each package.
+ This makes sure that at most only one license prompt needs to be answered. At the time of writing this module, a C(sdkmanager)'s
+ package may belong to at most one license type that needs to be accepted. However, if this changes in the future, the
+ module may hang as there might be more prompts generated by the C(sdkmanager) tool which the module is unable to answer.
+ If this becomes the case, file an issue and in the meantime, consider accepting all the licenses in advance, as it is
+ described in the C(sdkmanager) L(documentation,https://developer.android.com/tools/sdkmanager#accept-licenses), for instance,
+ using the M(ansible.builtin.command) module.
seealso:
- name: sdkmanager tool documentation
description: Detailed information of how to install and use sdkmanager command line tool.
@@ -126,13 +126,13 @@ installed:
description: A list of packages that have been installed.
returned: when packages have changed
type: list
- sample: ['build-tools;34.0.0', 'platform-tools']
+ sample: ["build-tools;34.0.0", "platform-tools"]
removed:
description: A list of packages that have been removed.
returned: when packages have changed
type: list
- sample: ['build-tools;34.0.0', 'platform-tools']
+ sample: ["build-tools;34.0.0", "platform-tools"]
"""
from ansible_collections.community.general.plugins.module_utils.mh.module_helper import StateModuleHelper
@@ -150,7 +150,6 @@ class AndroidSdk(StateModuleHelper):
),
supports_check_mode=True
)
- use_old_vardict = False
def __init_module__(self):
self.sdkmanager = AndroidSdkManager(self.module)
diff --git a/plugins/modules/ansible_galaxy_install.py b/plugins/modules/ansible_galaxy_install.py
index ad055dfa14..4712ca9a3c 100644
--- a/plugins/modules/ansible_galaxy_install.py
+++ b/plugins/modules/ansible_galaxy_install.py
@@ -18,8 +18,8 @@ description:
- This module allows the installation of Ansible collections or roles using C(ansible-galaxy).
notes:
- Support for B(Ansible 2.9/2.10) was removed in community.general 8.0.0.
- - The module will try and run using the C(C.UTF-8) locale. If that fails, it will try C(en_US.UTF-8). If that one also fails,
- the module will fail.
+ - The module tries to run using the C(C.UTF-8) locale. If that fails, it tries C(en_US.UTF-8). If that one also fails, the
+ module fails.
seealso:
- name: C(ansible-galaxy) command manual page
description: Manual page for the command.
@@ -37,10 +37,10 @@ attributes:
options:
state:
description:
- - If O(state=present) then the collection or role will be installed. Note that the collections and roles are not updated
+ - If O(state=present) then the collection or role is installed. Note that the collections and roles are not updated
with this option.
- - Currently the O(state=latest) is ignored unless O(type=collection), and it will ensure the collection is installed
- and updated to the latest available version.
+ - Currently the O(state=latest) is ignored unless O(type=collection), and it ensures the collection is installed and
+ updated to the latest available version.
- Please note that O(force=true) can be used to perform upgrade regardless of O(type).
type: str
choices: [present, latest]
@@ -71,7 +71,7 @@ options:
dest:
description:
- The path to the directory containing your collections or roles, according to the value of O(type).
- - Please notice that C(ansible-galaxy) will not install collections with O(type=both), when O(requirements_file) contains
+ - Please notice that C(ansible-galaxy) does not install collections with O(type=both), when O(requirements_file) contains
both roles and collections and O(dest) is specified.
type: path
no_deps:
@@ -83,7 +83,7 @@ options:
force:
description:
- Force overwriting existing roles and/or collections.
- - It can be used for upgrading, but the module output will always report C(changed=true).
+ - It can be used for upgrading, but the module output always reports C(changed=true).
- Using O(force=true) is mandatory when downgrading.
type: bool
default: false
@@ -220,7 +220,6 @@ class AnsibleGalaxyInstall(ModuleHelper):
required_if=[('type', 'both', ['requirements_file'])],
supports_check_mode=False,
)
- use_old_vardict = False
command = 'ansible-galaxy'
command_args_formats = dict(
diff --git a/plugins/modules/apache2_mod_proxy.py b/plugins/modules/apache2_mod_proxy.py
index 73712d6efc..3816845257 100644
--- a/plugins/modules/apache2_mod_proxy.py
+++ b/plugins/modules/apache2_mod_proxy.py
@@ -41,8 +41,8 @@ options:
description:
- (IPv4|IPv6|FQDN) of the balancer member to get or to set attributes to. Port number is autodetected and should not
be specified here.
- - If undefined, the M(community.general.apache2_mod_proxy) module will return a members list of dictionaries of all the current
- balancer pool members' attributes.
+ - If undefined, the M(community.general.apache2_mod_proxy) module returns a members list of dictionaries of all the
+ current balancer pool members' attributes.
state:
type: list
elements: str
@@ -117,18 +117,19 @@ member:
type: dict
returned: success
sample:
- {"attributes":
- {"Busy": "0",
- "Elected": "42",
- "Factor": "1",
- "From": "136K",
- "Load": "0",
- "Route": null,
- "RouteRedir": null,
- "Set": "0",
- "Status": "Init Ok ",
- "To": " 47K",
- "Worker URL": null
+ {
+ "attributes": {
+ "Busy": "0",
+ "Elected": "42",
+ "Factor": "1",
+ "From": "136K",
+ "Load": "0",
+ "Route": null,
+ "RouteRedir": null,
+ "Set": "0",
+ "Status": "Init Ok ",
+ "To": " 47K",
+ "Worker URL": null
},
"balancer_url": "http://10.10.0.2/balancer-manager/",
"host": "10.10.0.20",
@@ -137,10 +138,10 @@ member:
"port": 8080,
"protocol": "http",
"status": {
- "disabled": false,
- "drained": false,
- "hot_standby": false,
- "ignore_errors": false
+ "disabled": false,
+ "drained": false,
+ "hot_standby": false,
+ "ignore_errors": false
}
}
members:
@@ -149,7 +150,9 @@ members:
returned: success
type: list
sample:
- [{"attributes": {
+ [
+ {
+ "attributes": {
"Busy": "0",
"Elected": "42",
"Factor": "1",
@@ -161,21 +164,22 @@ members:
"Status": "Init Ok ",
"To": " 47K",
"Worker URL": null
- },
- "balancer_url": "http://10.10.0.2/balancer-manager/",
- "host": "10.10.0.20",
- "management_url": "http://10.10.0.2/lb/?b=mywsbalancer&w=http://10.10.0.20:8080/ws&nonce=8925436c-79c6-4841-8936-e7d13b79239b",
- "path": "/ws",
- "port": 8080,
- "protocol": "http",
- "status": {
+ },
+ "balancer_url": "http://10.10.0.2/balancer-manager/",
+ "host": "10.10.0.20",
+ "management_url": "http://10.10.0.2/lb/?b=mywsbalancer&w=http://10.10.0.20:8080/ws&nonce=8925436c-79c6-4841-8936-e7d13b79239b",
+ "path": "/ws",
+ "port": 8080,
+ "protocol": "http",
+ "status": {
"disabled": false,
"drained": false,
"hot_standby": false,
"ignore_errors": false
- }
+ }
},
- {"attributes": {
+ {
+ "attributes": {
"Busy": "0",
"Elected": "42",
"Factor": "1",
@@ -187,18 +191,19 @@ members:
"Status": "Init Ok ",
"To": " 47K",
"Worker URL": null
- },
- "balancer_url": "http://10.10.0.2/balancer-manager/",
- "host": "10.10.0.21",
- "management_url": "http://10.10.0.2/lb/?b=mywsbalancer&w=http://10.10.0.21:8080/ws&nonce=8925436c-79c6-4841-8936-e7d13b79239b",
- "path": "/ws",
- "port": 8080,
- "protocol": "http",
- "status": {
+ },
+ "balancer_url": "http://10.10.0.2/balancer-manager/",
+ "host": "10.10.0.21",
+ "management_url": "http://10.10.0.2/lb/?b=mywsbalancer&w=http://10.10.0.21:8080/ws&nonce=8925436c-79c6-4841-8936-e7d13b79239b",
+ "path": "/ws",
+ "port": 8080,
+ "protocol": "http",
+ "status": {
"disabled": false,
"drained": false,
"hot_standby": false,
- "ignore_errors": false}
+ "ignore_errors": false
+ }
}
]
"""
@@ -382,7 +387,6 @@ class ApacheModProxy(ModuleHelper):
),
supports_check_mode=True
)
- use_old_vardict = False
def __init_module__(self):
deps.validate(self.module)
diff --git a/plugins/modules/apache2_module.py b/plugins/modules/apache2_module.py
index 2007b5d1f1..99db968670 100644
--- a/plugins/modules/apache2_module.py
+++ b/plugins/modules/apache2_module.py
@@ -113,22 +113,6 @@ result:
description: Message about action taken.
returned: always
type: str
-warnings:
- description: List of warning messages.
- returned: when needed
- type: list
-rc:
- description: Return code of underlying command.
- returned: failed
- type: int
-stdout:
- description: The stdout of underlying command.
- returned: failed
- type: str
-stderr:
- description: The stderr of underlying command.
- returned: failed
- type: str
"""
import re
@@ -164,12 +148,12 @@ def _module_is_enabled(module):
if module.params['ignore_configcheck']:
if 'AH00534' in stderr and 'mpm_' in module.params['name']:
if module.params['warn_mpm_absent']:
- module.warnings.append(
+ module.warn(
"No MPM module loaded! apache2 reload AND other module actions"
" will fail if no MPM module is loaded immediately."
)
else:
- module.warnings.append(error_msg)
+ module.warn(error_msg)
return False
else:
module.fail_json(msg=error_msg)
@@ -224,9 +208,7 @@ def _set_state(module, state):
if _module_is_enabled(module) != want_enabled:
if module.check_mode:
- module.exit_json(changed=True,
- result=success_msg,
- warnings=module.warnings)
+ module.exit_json(changed=True, result=success_msg)
a2mod_binary_path = module.get_bin_path(a2mod_binary)
if a2mod_binary_path is None:
@@ -241,9 +223,7 @@ def _set_state(module, state):
result, stdout, stderr = module.run_command(a2mod_binary_cmd + [name])
if _module_is_enabled(module) == want_enabled:
- module.exit_json(changed=True,
- result=success_msg,
- warnings=module.warnings)
+ module.exit_json(changed=True, result=success_msg)
else:
msg = (
'Failed to set module {name} to {state}:\n'
@@ -261,9 +241,7 @@ def _set_state(module, state):
stdout=stdout,
stderr=stderr)
else:
- module.exit_json(changed=False,
- result=success_msg,
- warnings=module.warnings)
+ module.exit_json(changed=False, result=success_msg)
def main():
@@ -279,8 +257,6 @@ def main():
supports_check_mode=True,
)
- module.warnings = []
-
name = module.params['name']
if name == 'cgi' and _run_threaded(module):
module.fail_json(msg="Your MPM seems to be threaded. No automatic actions on module cgi possible.")
diff --git a/plugins/modules/apk.py b/plugins/modules/apk.py
index 7f1f83ce56..e70e51a1f0 100644
--- a/plugins/modules/apk.py
+++ b/plugins/modules/apk.py
@@ -47,8 +47,8 @@ options:
version_added: 1.0.0
repository:
description:
- - A package repository or multiple repositories. Unlike with the underlying apk command, this list will override the
- system repositories rather than supplement them.
+ - A package repository or multiple repositories. Unlike with the underlying apk command, this list overrides the system
+ repositories rather than supplement them.
type: list
elements: str
state:
@@ -79,7 +79,7 @@ options:
version_added: 5.4.0
notes:
- O(name) and O(upgrade) are mutually exclusive.
- - When used with a C(loop:) each package will be processed individually, it is much more efficient to pass the list directly
+ - When used with a C(loop:) each package is processed individually, it is much more efficient to pass the list directly
to the O(name) option.
"""
@@ -164,7 +164,7 @@ packages:
description: A list of packages that have been changed.
returned: when packages have changed
type: list
- sample: ['package', 'other-package']
+ sample: ["package", "other-package"]
"""
import re
@@ -351,6 +351,9 @@ def main():
p = module.params
+ if p['name'] and any(not name.strip() for name in p['name']):
+ module.fail_json(msg="Package name(s) cannot be empty or whitespace-only")
+
if p['no_cache']:
APK_PATH = "%s --no-cache" % (APK_PATH, )
diff --git a/plugins/modules/apt_rpm.py b/plugins/modules/apt_rpm.py
index 5a5ba57faf..1dcca5815c 100644
--- a/plugins/modules/apt_rpm.py
+++ b/plugins/modules/apt_rpm.py
@@ -35,9 +35,9 @@ options:
state:
description:
- Indicates the desired package state.
- - Please note that V(present) and V(installed) are equivalent to V(latest) right now. This will change in the future.
- To simply ensure that a package is installed, without upgrading it, use the V(present_not_latest) state.
- The states V(latest) and V(present_not_latest) have been added in community.general 8.6.0.
+ - Please note before community.general 11.0.0, V(present) and V(installed) were equivalent to V(latest). This changed
+ in community.general 11.0.0. Now they are equivalent to V(present_not_latest).
choices:
- absent
- present
@@ -307,17 +307,6 @@ def main():
module.fail_json(msg="cannot find /usr/bin/apt-get and/or /usr/bin/rpm")
p = module.params
- if p['state'] in ['installed', 'present']:
- module.deprecate(
- 'state=%s currently behaves unexpectedly by always upgrading to the latest version if'
- ' the package is already installed. This behavior is deprecated and will change in'
- ' community.general 11.0.0. You can use state=latest to explicitly request this behavior'
- ' or state=present_not_latest to explicitly request the behavior that state=%s will have'
- ' in community.general 11.0.0, namely that the package will not be upgraded if it is'
- ' already installed.' % (p['state'], p['state']),
- version='11.0.0',
- collection_name='community.general',
- )
modified = False
output = ""
@@ -341,7 +330,7 @@ def main():
packages = p['package']
if p['state'] in ['installed', 'present', 'present_not_latest', 'latest']:
- (m, out) = install_packages(module, packages, allow_upgrade=p['state'] != 'present_not_latest')
+ (m, out) = install_packages(module, packages, allow_upgrade=p['state'] == 'latest')
modified = modified or m
output += out
diff --git a/plugins/modules/archive.py b/plugins/modules/archive.py
index 4e4b6368ce..65b397c255 100644
--- a/plugins/modules/archive.py
+++ b/plugins/modules/archive.py
@@ -43,7 +43,7 @@ options:
- The file name of the destination archive. The parent directory must exists on the remote host.
- This is required when O(path) refers to multiple files by either specifying a glob, a directory or multiple paths
in a list.
- - If the destination archive already exists, it will be truncated and overwritten.
+ - If the destination archive already exists, it is truncated and overwritten.
type: path
exclude_path:
description:
diff --git a/plugins/modules/awall.py b/plugins/modules/awall.py
index b95f36ea8d..0bc4ca1d79 100644
--- a/plugins/modules/awall.py
+++ b/plugins/modules/awall.py
@@ -40,7 +40,7 @@ options:
description:
- Activate the new firewall rules.
- Can be run with other steps or on its own.
- - Idempotency is affected if O(activate=true), as the module will always report a changed state.
+ - Idempotency is affected if O(activate=true), as the module always reports a changed state.
type: bool
default: false
notes:
diff --git a/plugins/modules/beadm.py b/plugins/modules/beadm.py
index 3d9d8ca651..0c200661f1 100644
--- a/plugins/modules/beadm.py
+++ b/plugins/modules/beadm.py
@@ -32,7 +32,7 @@ options:
aliases: ["be"]
snapshot:
description:
- - If specified, the new boot environment will be cloned from the given snapshot or inactive boot environment.
+ - If specified, the new boot environment is cloned from the given snapshot or inactive boot environment.
type: str
description:
description:
diff --git a/plugins/modules/bearychat.py b/plugins/modules/bearychat.py
index 1dec1bce68..e738d83d36 100644
--- a/plugins/modules/bearychat.py
+++ b/plugins/modules/bearychat.py
@@ -33,7 +33,7 @@ options:
- Message to send.
markdown:
description:
- - If V(true), text will be parsed as markdown.
+ - If V(true), text is parsed as markdown.
default: true
type: bool
channel:
diff --git a/plugins/modules/bigpanda.py b/plugins/modules/bigpanda.py
index aef9c15c92..81e2085b7d 100644
--- a/plugins/modules/bigpanda.py
+++ b/plugins/modules/bigpanda.py
@@ -75,7 +75,7 @@ options:
default: "https://api.bigpanda.io"
validate_certs:
description:
- - If V(false), SSL certificates for the target URL will not be validated. This should only be used on personally controlled
+ - If V(false), SSL certificates for the target URL are not validated. This should only be used on personally controlled
sites using self-signed certificates.
required: false
default: true
@@ -150,14 +150,14 @@ def main():
version=dict(required=True),
token=dict(required=True, no_log=True),
state=dict(required=True, choices=['started', 'finished', 'failed']),
- hosts=dict(required=False, aliases=['host']),
- env=dict(required=False),
- owner=dict(required=False),
- description=dict(required=False),
- deployment_message=dict(required=False),
- source_system=dict(required=False, default='ansible'),
+ hosts=dict(aliases=['host']),
+ env=dict(),
+ owner=dict(),
+ description=dict(),
+ deployment_message=dict(),
+ source_system=dict(default='ansible'),
validate_certs=dict(default=True, type='bool'),
- url=dict(required=False, default='https://api.bigpanda.io'),
+ url=dict(default='https://api.bigpanda.io'),
),
supports_check_mode=True,
)
diff --git a/plugins/modules/bitbucket_pipeline_known_host.py b/plugins/modules/bitbucket_pipeline_known_host.py
index f5594dc8ac..eb8b22b4f0 100644
--- a/plugins/modules/bitbucket_pipeline_known_host.py
+++ b/plugins/modules/bitbucket_pipeline_known_host.py
@@ -13,8 +13,7 @@ module: bitbucket_pipeline_known_host
short_description: Manages Bitbucket pipeline known hosts
description:
- Manages Bitbucket pipeline known hosts under the "SSH Keys" menu.
- - The host fingerprint will be retrieved automatically, but in case of an error, one can use O(key) field to specify it
- manually.
+ - The host fingerprint is retrieved automatically, but in case of an error, one can use O(key) field to specify it manually.
author:
- Evgeniy Krysanov (@catcombo)
extends_documentation_fragment:
diff --git a/plugins/modules/bootc_manage.py b/plugins/modules/bootc_manage.py
index 44444960df..da92c02b06 100644
--- a/plugins/modules/bootc_manage.py
+++ b/plugins/modules/bootc_manage.py
@@ -20,7 +20,7 @@ options:
state:
description:
- Control whether to apply the latest image or switch the image.
- - B(Note:) This will not reboot the system.
+ - B(Note:) This does not reboot the system.
- Please use M(ansible.builtin.reboot) to reboot the system.
required: true
type: str
@@ -57,7 +57,7 @@ from ansible.module_utils.common.locale import get_best_parsable_locale
def main():
argument_spec = dict(
state=dict(type='str', required=True, choices=['switch', 'latest']),
- image=dict(type='str', required=False),
+ image=dict(type='str'),
)
module = AnsibleModule(
argument_spec=argument_spec,
diff --git a/plugins/modules/bower.py b/plugins/modules/bower.py
index 3e7ebdaecc..547152fa98 100644
--- a/plugins/modules/bower.py
+++ b/plugins/modules/bower.py
@@ -187,13 +187,13 @@ class Bower(object):
def main():
arg_spec = dict(
- name=dict(default=None),
+ name=dict(),
offline=dict(default=False, type='bool'),
production=dict(default=False, type='bool'),
path=dict(required=True, type='path'),
- relative_execpath=dict(default=None, required=False, type='path'),
+ relative_execpath=dict(type='path'),
state=dict(default='present', choices=['present', 'absent', 'latest', ]),
- version=dict(default=None),
+ version=dict(),
)
module = AnsibleModule(
argument_spec=arg_spec
diff --git a/plugins/modules/btrfs_info.py b/plugins/modules/btrfs_info.py
index 0e432dfaff..9467fb782d 100644
--- a/plugins/modules/btrfs_info.py
+++ b/plugins/modules/btrfs_info.py
@@ -64,7 +64,7 @@ filesystems:
mountpoints:
description: Paths where the subvolume is mounted on the targeted host.
type: list
- sample: ['/home']
+ sample: ["/home"]
parent:
description: The identifier of this subvolume's parent.
type: int
diff --git a/plugins/modules/btrfs_subvolume.py b/plugins/modules/btrfs_subvolume.py
index 7e085d6103..3c34ef4680 100644
--- a/plugins/modules/btrfs_subvolume.py
+++ b/plugins/modules/btrfs_subvolume.py
@@ -64,9 +64,9 @@ options:
no change is required. Warning, this option does not yet verify that the target subvolume was generated from a snapshot
of the requested source.
- V(clobber) - If a subvolume already exists at the requested location, delete it first. This option is not idempotent
- and will result in a new snapshot being generated on every execution.
+ and results in a new snapshot being generated on every execution.
- V(error) - If a subvolume already exists at the requested location, return an error. This option is not idempotent
- and will result in an error on replay of the module.
+ and results in an error on replay of the module.
type: str
choices: [skip, clobber, error]
default: skip
@@ -80,8 +80,8 @@ options:
notes:
- If any or all of the options O(filesystem_device), O(filesystem_label) or O(filesystem_uuid) parameters are provided,
there is expected to be a matching btrfs filesystem. If none are provided and only a single btrfs filesystem exists or
- only a single btrfs filesystem is mounted, that filesystem will be used; otherwise, the module will take no action and
- return an error.
+ only a single btrfs filesystem is mounted, that filesystem is used; otherwise, the module takes no action and returns an
+ error.
extends_documentation_fragment:
- community.general.attributes
@@ -180,7 +180,7 @@ filesystem:
mountpoints:
description: Paths where the subvolume is mounted on the targeted host.
type: list
- sample: ['/home']
+ sample: ["/home"]
parent:
description: The identifier of this subvolume's parent.
type: int
@@ -644,16 +644,16 @@ class BtrfsSubvolumeModule(object):
def run_module():
module_args = dict(
- automount=dict(type='bool', required=False, default=False),
- default=dict(type='bool', required=False, default=False),
- filesystem_device=dict(type='path', required=False),
- filesystem_label=dict(type='str', required=False),
- filesystem_uuid=dict(type='str', required=False),
+ automount=dict(type='bool', default=False),
+ default=dict(type='bool', default=False),
+ filesystem_device=dict(type='path'),
+ filesystem_label=dict(type='str'),
+ filesystem_uuid=dict(type='str'),
name=dict(type='str', required=True),
recursive=dict(type='bool', default=False),
- state=dict(type='str', required=False, default='present', choices=['present', 'absent']),
- snapshot_source=dict(type='str', required=False),
- snapshot_conflict=dict(type='str', required=False, default='skip', choices=['skip', 'clobber', 'error'])
+ state=dict(type='str', default='present', choices=['present', 'absent']),
+ snapshot_source=dict(type='str'),
+ snapshot_conflict=dict(type='str', default='skip', choices=['skip', 'clobber', 'error'])
)
module = AnsibleModule(
diff --git a/plugins/modules/bundler.py b/plugins/modules/bundler.py
index bfd7fe7ec1..6bf2556110 100644
--- a/plugins/modules/bundler.py
+++ b/plugins/modules/bundler.py
@@ -36,13 +36,13 @@ options:
type: path
description:
- The directory to execute the bundler commands from. This directory needs to contain a valid Gemfile or .bundle/ directory.
- - If not specified, it will default to the temporary working directory.
+ - If not specified, it defaults to the temporary working directory.
exclude_groups:
type: list
elements: str
description:
- A list of Gemfile groups to exclude during operations. This only applies when O(state=present). Bundler considers
- this a 'remembered' property for the Gemfile and will automatically exclude groups in future operations even if O(exclude_groups)
+ this a 'remembered' property for the Gemfile and automatically excludes groups in future operations even if O(exclude_groups)
is not set.
clean:
description:
@@ -53,7 +53,7 @@ options:
type: path
description:
- Only applies if O(state=present). The path to the gemfile to use to install gems.
- - If not specified it will default to the Gemfile in current directory.
+ - If not specified it defaults to the Gemfile in current directory.
local:
description:
- If set only installs gems from the cache on the target host.
@@ -61,8 +61,8 @@ options:
default: false
deployment_mode:
description:
- - Only applies if O(state=present). If set it will install gems in C(./vendor/bundle) instead of the default location.
- Requires a C(Gemfile.lock) file to have been created prior.
+ - Only applies if O(state=present). If set it installs gems in C(./vendor/bundle) instead of the default location. Requires
+ a C(Gemfile.lock) file to have been created prior.
type: bool
default: false
user_install:
@@ -75,12 +75,12 @@ options:
description:
- Only applies if O(state=present). Specifies the directory to install the gems into. If O(chdir) is set then this path
is relative to O(chdir).
- - If not specified the default RubyGems gem paths will be used.
+ - If not specified the default RubyGems gem paths are used.
binstub_directory:
type: path
description:
- Only applies if O(state=present). Specifies the directory to install any gem bins files to. When executed the bin
- files will run within the context of the Gemfile and fail if any required gem dependencies are not installed. If O(chdir)
+ files run within the context of the Gemfile and fail if any required gem dependencies are not installed. If O(chdir)
is set then this path is relative to O(chdir).
extra_args:
type: str
@@ -131,18 +131,18 @@ def get_bundler_executable(module):
def main():
module = AnsibleModule(
argument_spec=dict(
- executable=dict(default=None, required=False),
- state=dict(default='present', required=False, choices=['present', 'latest']),
- chdir=dict(default=None, required=False, type='path'),
- exclude_groups=dict(default=None, required=False, type='list', elements='str'),
- clean=dict(default=False, required=False, type='bool'),
- gemfile=dict(default=None, required=False, type='path'),
- local=dict(default=False, required=False, type='bool'),
- deployment_mode=dict(default=False, required=False, type='bool'),
- user_install=dict(default=True, required=False, type='bool'),
- gem_path=dict(default=None, required=False, type='path'),
- binstub_directory=dict(default=None, required=False, type='path'),
- extra_args=dict(default=None, required=False),
+ executable=dict(),
+ state=dict(default='present', choices=['present', 'latest']),
+ chdir=dict(type='path'),
+ exclude_groups=dict(type='list', elements='str'),
+ clean=dict(default=False, type='bool'),
+ gemfile=dict(type='path'),
+ local=dict(default=False, type='bool'),
+ deployment_mode=dict(default=False, type='bool'),
+ user_install=dict(default=True, type='bool'),
+ gem_path=dict(type='path'),
+ binstub_directory=dict(type='path'),
+ extra_args=dict(),
),
supports_check_mode=True
)
diff --git a/plugins/modules/bzr.py b/plugins/modules/bzr.py
index 7a4512a5dd..76ae917802 100644
--- a/plugins/modules/bzr.py
+++ b/plugins/modules/bzr.py
@@ -42,12 +42,12 @@ options:
type: str
force:
description:
- - If V(true), any modified files in the working tree will be discarded.
+ - If V(true), any modified files in the working tree is discarded.
type: bool
default: false
executable:
description:
- - Path to bzr executable to use. If not supplied, the normal mechanism for resolving binary paths will be used.
+ - Path to C(bzr) executable to use. If not supplied, the normal mechanism for resolving binary paths is used.
type: str
"""
diff --git a/plugins/modules/campfire.py b/plugins/modules/campfire.py
index 91e83fc7d1..128790c372 100644
--- a/plugins/modules/campfire.py
+++ b/plugins/modules/campfire.py
@@ -14,7 +14,7 @@ module: campfire
short_description: Send a message to Campfire
description:
- Send a message to Campfire.
- - Messages with newlines will result in a "Paste" message being sent.
+ - Messages with newlines result in a "Paste" message being sent.
extends_documentation_fragment:
- community.general.attributes
attributes:
@@ -48,10 +48,51 @@ options:
description:
- Send a notification sound before the message.
required: false
- choices: ["56k", "bell", "bezos", "bueller", "clowntown", "cottoneyejoe", "crickets", "dadgummit", "dangerzone", "danielsan",
- "deeper", "drama", "greatjob", "greyjoy", "guarantee", "heygirl", "horn", "horror", "inconceivable", "live", "loggins",
- "makeitso", "noooo", "nyan", "ohmy", "ohyeah", "pushit", "rimshot", "rollout", "rumble", "sax", "secret", "sexyback",
- "story", "tada", "tmyk", "trololo", "trombone", "unix", "vuvuzela", "what", "whoomp", "yeah", "yodel"]
+ choices:
+ - 56k
+ - bell
+ - bezos
+ - bueller
+ - clowntown
+ - cottoneyejoe
+ - crickets
+ - dadgummit
+ - dangerzone
+ - danielsan
+ - deeper
+ - drama
+ - greatjob
+ - greyjoy
+ - guarantee
+ - heygirl
+ - horn
+ - horror
+ - inconceivable
+ - live
+ - loggins
+ - makeitso
+ - noooo
+ - nyan
+ - ohmy
+ - ohyeah
+ - pushit
+ - rimshot
+ - rollout
+ - rumble
+ - sax
+ - secret
+ - sexyback
+ - story
+ - tada
+ - tmyk
+ - trololo
+ - trombone
+ - unix
+ - vuvuzela
+ - what
+ - whoomp
+ - yeah
+ - yodel
# informational: requirements for nodes
requirements: []
@@ -96,8 +137,7 @@ def main():
token=dict(required=True, no_log=True),
room=dict(required=True),
msg=dict(required=True),
- notify=dict(required=False,
- choices=["56k", "bell", "bezos", "bueller",
+ notify=dict(choices=["56k", "bell", "bezos", "bueller",
"clowntown", "cottoneyejoe",
"crickets", "dadgummit", "dangerzone",
"danielsan", "deeper", "drama",
diff --git a/plugins/modules/capabilities.py b/plugins/modules/capabilities.py
index 088c15e4f6..08bd2e85ff 100644
--- a/plugins/modules/capabilities.py
+++ b/plugins/modules/capabilities.py
@@ -40,10 +40,10 @@ options:
choices: [absent, present]
default: present
notes:
- - The capabilities system will automatically transform operators and flags into the effective set, so for example, C(cap_foo=ep)
- will probably become C(cap_foo+ep).
- - This module does not attempt to determine the final operator and flags to compare, so you will want to ensure that your
- capabilities argument matches the final capabilities.
+ - The capabilities system automatically transforms operators and flags into the effective set, so for example, C(cap_foo=ep)
+ probably becomes C(cap_foo+ep).
+ - This module does not attempt to determine the final operator and flags to compare, so you want to ensure that your capabilities
+ argument matches the final capabilities.
author:
- Nate Coraor (@natefoo)
"""
@@ -123,6 +123,8 @@ class CapabilitiesModule(object):
if ' =' in stdout:
# process output of an older version of libcap
caps = stdout.split(' =')[1].strip().split()
+ elif stdout.strip().endswith(")"): # '/foo (Error Message)'
+ self.module.fail_json(msg="Unable to get capabilities of %s" % path, stdout=stdout.strip(), stderr=stderr)
else:
# otherwise, we have a newer version here
# see original commit message of cap/v0.2.40-18-g177cd41 in libcap.git
diff --git a/plugins/modules/cargo.py b/plugins/modules/cargo.py
index 94a1102725..c00983fade 100644
--- a/plugins/modules/cargo.py
+++ b/plugins/modules/cargo.py
@@ -28,7 +28,7 @@ options:
executable:
description:
- Path to the C(cargo) installed in the system.
- - If not specified, the module will look C(cargo) in E(PATH).
+ - If not specified, the module looks for C(cargo) in E(PATH).
type: path
version_added: 7.5.0
name:
@@ -39,11 +39,11 @@ options:
required: true
path:
description: The base path where to install the Rust packages. Cargo automatically appends V(/bin). In other words, V(/usr/local)
- will become V(/usr/local/bin).
+ becomes V(/usr/local/bin).
type: path
version:
- description: The version to install. If O(name) contains multiple values, the module will try to install all of them in
- this version.
+ description: The version to install. If O(name) contains multiple values, the module tries to install all of them in this
+ version.
type: str
required: false
locked:
@@ -68,6 +68,15 @@ options:
type: path
required: false
version_added: 9.1.0
+ features:
+ description:
+ - List of features to activate.
+ - This is only used when installing packages.
+ type: list
+ elements: str
+ required: false
+ default: []
+ version_added: 11.0.0
requirements:
- cargo installed
"""
@@ -106,6 +115,12 @@ EXAMPLES = r"""
community.general.cargo:
name: ludusavi
directory: /path/to/ludusavi/source
+
+- name: Install "serpl" Rust package with ast_grep feature
+ community.general.cargo:
+ name: serpl
+ features:
+ - ast_grep
"""
import json
@@ -125,6 +140,7 @@ class Cargo(object):
self.version = kwargs["version"]
self.locked = kwargs["locked"]
self.directory = kwargs["directory"]
+ self.features = kwargs["features"]
@property
def path(self):
@@ -176,6 +192,8 @@ class Cargo(object):
if self.directory:
cmd.append("--path")
cmd.append(self.directory)
+ if self.features:
+ cmd += ["--features", ",".join(self.features)]
return self._exec(cmd)
def is_outdated(self, name):
@@ -229,13 +247,14 @@ class Cargo(object):
def main():
arg_spec = dict(
- executable=dict(default=None, type="path"),
+ executable=dict(type="path"),
name=dict(required=True, type="list", elements="str"),
- path=dict(default=None, type="path"),
+ path=dict(type="path"),
state=dict(default="present", choices=["present", "absent", "latest"]),
- version=dict(default=None, type="str"),
+ version=dict(type="str"),
locked=dict(default=False, type="bool"),
- directory=dict(default=None, type="path"),
+ directory=dict(type="path"),
+ features=dict(default=[], type="list", elements="str"),
)
module = AnsibleModule(argument_spec=arg_spec, supports_check_mode=True)
diff --git a/plugins/modules/catapult.py b/plugins/modules/catapult.py
index 5329c90f54..448de5d13d 100644
--- a/plugins/modules/catapult.py
+++ b/plugins/modules/catapult.py
@@ -16,6 +16,12 @@ module: catapult
short_description: Send a sms / mms using the catapult bandwidth API
description:
- Allows notifications to be sent using SMS / MMS using the catapult bandwidth API.
+deprecated:
+ removed_in: 13.0.0
+ why: >-
+ DNS fails to resolve the API endpoint used by the module since Oct 2024.
+ See L(the associated issue, https://github.com/ansible-collections/community.general/issues/10318) for details.
+ alternative: There is none.
extends_documentation_fragment:
- community.general.attributes
attributes:
@@ -89,14 +95,6 @@ EXAMPLES = r"""
api_secret: "{{ api_secret }}"
"""
-RETURN = r"""
-changed:
- description: Whether the API accepted the message.
- returned: always
- type: bool
- sample: true
-"""
-
import json
@@ -132,7 +130,7 @@ def main():
user_id=dict(required=True),
api_token=dict(required=True, no_log=True),
api_secret=dict(required=True, no_log=True),
- media=dict(default=None, required=False),
+ media=dict(),
),
)
diff --git a/plugins/modules/cisco_webex.py b/plugins/modules/cisco_webex.py
index 14b8716846..f957f4121d 100644
--- a/plugins/modules/cisco_webex.py
+++ b/plugins/modules/cisco_webex.py
@@ -177,7 +177,7 @@ def main():
argument_spec=dict(
recipient_type=dict(required=True, choices=['roomId', 'toPersonEmail', 'toPersonId']),
recipient_id=dict(required=True, no_log=True),
- msg_type=dict(required=False, default='text', aliases=['message_type'], choices=['text', 'markdown']),
+ msg_type=dict(default='text', aliases=['message_type'], choices=['text', 'markdown']),
personal_token=dict(required=True, no_log=True, aliases=['token']),
msg=dict(required=True),
),
diff --git a/plugins/modules/clc_aa_policy.py b/plugins/modules/clc_aa_policy.py
deleted file mode 100644
index eb8c57f60c..0000000000
--- a/plugins/modules/clc_aa_policy.py
+++ /dev/null
@@ -1,338 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-#
-# Copyright (c) 2015 CenturyLink
-# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = r"""
-module: clc_aa_policy
-short_description: Create or Delete Anti-Affinity Policies at CenturyLink Cloud
-description:
- - An Ansible module to Create or Delete Anti-Affinity Policies at CenturyLink Cloud.
-extends_documentation_fragment:
- - community.general.attributes
- - community.general.clc
-author:
- - "CLC Runner (@clc-runner)"
-attributes:
- check_mode:
- support: full
- diff_mode:
- support: none
-options:
- name:
- description:
- - The name of the Anti-Affinity Policy.
- type: str
- required: true
- location:
- description:
- - Datacenter in which the policy lives/should live.
- type: str
- required: true
- state:
- description:
- - Whether to create or delete the policy.
- type: str
- required: false
- default: present
- choices: ['present', 'absent']
-"""
-
-EXAMPLES = r"""
-- name: Create AA Policy
- hosts: localhost
- gather_facts: false
- connection: local
- tasks:
- - name: Create an Anti Affinity Policy
- community.general.clc_aa_policy:
- name: Hammer Time
- location: UK3
- state: present
- register: policy
-
- - name: Debug
- ansible.builtin.debug:
- var: policy
-
-- name: Delete AA Policy
- hosts: localhost
- gather_facts: false
- connection: local
- tasks:
- - name: Delete an Anti Affinity Policy
- community.general.clc_aa_policy:
- name: Hammer Time
- location: UK3
- state: absent
- register: policy
-
- - name: Debug
- ansible.builtin.debug:
- var: policy
-"""
-
-RETURN = r"""
-policy:
- description: The anti-affinity policy information.
- returned: success
- type: dict
- sample:
- {
- "id":"1a28dd0988984d87b9cd61fa8da15424",
- "name":"test_aa_policy",
- "location":"UC1",
- "links":[
- {
- "rel":"self",
- "href":"/v2/antiAffinityPolicies/wfad/1a28dd0988984d87b9cd61fa8da15424",
- "verbs":[
- "GET",
- "DELETE",
- "PUT"
- ]
- },
- {
- "rel":"location",
- "href":"/v2/datacenters/wfad/UC1",
- "id":"uc1",
- "name":"UC1 - US West (Santa Clara)"
- }
- ]
- }
-"""
-
-__version__ = '${version}'
-
-import os
-import traceback
-
-from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
-
-REQUESTS_IMP_ERR = None
-try:
- import requests
-except ImportError:
- REQUESTS_IMP_ERR = traceback.format_exc()
- REQUESTS_FOUND = False
-else:
- REQUESTS_FOUND = True
-
-#
-# Requires the clc-python-sdk:
-# sudo pip install clc-sdk
-#
-CLC_IMP_ERR = None
-try:
- import clc as clc_sdk
- from clc import CLCException
-except ImportError:
- CLC_IMP_ERR = traceback.format_exc()
- CLC_FOUND = False
- clc_sdk = None
-else:
- CLC_FOUND = True
-
-from ansible.module_utils.basic import AnsibleModule, missing_required_lib
-
-
-class ClcAntiAffinityPolicy:
-
- clc = clc_sdk
- module = None
-
- def __init__(self, module):
- """
- Construct module
- """
- self.module = module
- self.policy_dict = {}
-
- if not CLC_FOUND:
- self.module.fail_json(msg=missing_required_lib('clc-sdk'),
- exception=CLC_IMP_ERR)
- if not REQUESTS_FOUND:
- self.module.fail_json(msg=missing_required_lib('requests'),
- exception=REQUESTS_IMP_ERR)
- if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'):
- self.module.fail_json(
- msg='requests library version should be >= 2.5.0')
-
- self._set_user_agent(self.clc)
-
- @staticmethod
- def _define_module_argument_spec():
- """
- Define the argument spec for the ansible module
- :return: argument spec dictionary
- """
- argument_spec = dict(
- name=dict(required=True),
- location=dict(required=True),
- state=dict(default='present', choices=['present', 'absent']),
- )
- return argument_spec
-
- # Module Behavior Goodness
- def process_request(self):
- """
- Process the request - Main Code Path
- :return: Returns with either an exit_json or fail_json
- """
- p = self.module.params
-
- self._set_clc_credentials_from_env()
- self.policy_dict = self._get_policies_for_datacenter(p)
-
- if p['state'] == "absent":
- changed, policy = self._ensure_policy_is_absent(p)
- else:
- changed, policy = self._ensure_policy_is_present(p)
-
- if hasattr(policy, 'data'):
- policy = policy.data
- elif hasattr(policy, '__dict__'):
- policy = policy.__dict__
-
- self.module.exit_json(changed=changed, policy=policy)
-
- def _set_clc_credentials_from_env(self):
- """
- Set the CLC Credentials on the sdk by reading environment variables
- :return: none
- """
- env = os.environ
- v2_api_token = env.get('CLC_V2_API_TOKEN', False)
- v2_api_username = env.get('CLC_V2_API_USERNAME', False)
- v2_api_passwd = env.get('CLC_V2_API_PASSWD', False)
- clc_alias = env.get('CLC_ACCT_ALIAS', False)
- api_url = env.get('CLC_V2_API_URL', False)
-
- if api_url:
- self.clc.defaults.ENDPOINT_URL_V2 = api_url
-
- if v2_api_token and clc_alias:
- self.clc._LOGIN_TOKEN_V2 = v2_api_token
- self.clc._V2_ENABLED = True
- self.clc.ALIAS = clc_alias
- elif v2_api_username and v2_api_passwd:
- self.clc.v2.SetCredentials(
- api_username=v2_api_username,
- api_passwd=v2_api_passwd)
- else:
- return self.module.fail_json(
- msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD "
- "environment variables")
-
- def _get_policies_for_datacenter(self, p):
- """
- Get the Policies for a datacenter by calling the CLC API.
- :param p: datacenter to get policies from
- :return: policies in the datacenter
- """
- response = {}
-
- policies = self.clc.v2.AntiAffinity.GetAll(location=p['location'])
-
- for policy in policies:
- response[policy.name] = policy
- return response
-
- def _create_policy(self, p):
- """
- Create an Anti Affinity Policy using the CLC API.
- :param p: datacenter to create policy in
- :return: response dictionary from the CLC API.
- """
- try:
- return self.clc.v2.AntiAffinity.Create(
- name=p['name'],
- location=p['location'])
- except CLCException as ex:
- self.module.fail_json(msg='Failed to create anti affinity policy : {0}. {1}'.format(
- p['name'], ex.response_text
- ))
-
- def _delete_policy(self, p):
- """
- Delete an Anti Affinity Policy using the CLC API.
- :param p: datacenter to delete a policy from
- :return: none
- """
- try:
- policy = self.policy_dict[p['name']]
- policy.Delete()
- except CLCException as ex:
- self.module.fail_json(msg='Failed to delete anti affinity policy : {0}. {1}'.format(
- p['name'], ex.response_text
- ))
-
- def _policy_exists(self, policy_name):
- """
- Check to see if an Anti Affinity Policy exists
- :param policy_name: name of the policy
- :return: boolean of if the policy exists
- """
- if policy_name in self.policy_dict:
- return self.policy_dict.get(policy_name)
-
- return False
-
- def _ensure_policy_is_absent(self, p):
- """
- Makes sure that a policy is absent
- :param p: dictionary of policy name
- :return: tuple of if a deletion occurred and the name of the policy that was deleted
- """
- changed = False
- if self._policy_exists(policy_name=p['name']):
- changed = True
- if not self.module.check_mode:
- self._delete_policy(p)
- return changed, None
-
- def _ensure_policy_is_present(self, p):
- """
- Ensures that a policy is present
- :param p: dictionary of a policy name
- :return: tuple of if an addition occurred and the name of the policy that was added
- """
- changed = False
- policy = self._policy_exists(policy_name=p['name'])
- if not policy:
- changed = True
- policy = None
- if not self.module.check_mode:
- policy = self._create_policy(p)
- return changed, policy
-
- @staticmethod
- def _set_user_agent(clc):
- if hasattr(clc, 'SetRequestsSession'):
- agent_string = "ClcAnsibleModule/" + __version__
- ses = requests.Session()
- ses.headers.update({"Api-Client": agent_string})
- ses.headers['User-Agent'] += " " + agent_string
- clc.SetRequestsSession(ses)
-
-
-def main():
- """
- The main function. Instantiates the module and calls process_request.
- :return: none
- """
- module = AnsibleModule(
- argument_spec=ClcAntiAffinityPolicy._define_module_argument_spec(),
- supports_check_mode=True)
- clc_aa_policy = ClcAntiAffinityPolicy(module)
- clc_aa_policy.process_request()
-
-
-if __name__ == '__main__':
- main()
diff --git a/plugins/modules/clc_alert_policy.py b/plugins/modules/clc_alert_policy.py
deleted file mode 100644
index 8075a8436c..0000000000
--- a/plugins/modules/clc_alert_policy.py
+++ /dev/null
@@ -1,522 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-#
-# Copyright (c) 2015 CenturyLink
-# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = r"""
-module: clc_alert_policy
-short_description: Create or Delete Alert Policies at CenturyLink Cloud
-description:
- - An Ansible module to Create or Delete Alert Policies at CenturyLink Cloud.
-deprecated:
- removed_in: 11.0.0
- why: >
- Lumen Public Cloud (formerly known as CenturyLink Cloud) has gone End-of-Life in September 2023.
- See more at U(https://www.ctl.io/knowledge-base/release-notes/2023/lumen-public-cloud-platform-end-of-life-notice/?).
- alternative: There is none.
-extends_documentation_fragment:
- - community.general.attributes
- - community.general.clc
-author:
- - "CLC Runner (@clc-runner)"
-attributes:
- check_mode:
- support: full
- diff_mode:
- support: none
-options:
- alias:
- description:
- - The alias of your CLC Account.
- type: str
- required: true
- name:
- description:
- - The name of the alert policy. This is mutually exclusive with O(id).
- type: str
- id:
- description:
- - The alert policy ID. This is mutually exclusive with O(name).
- type: str
- alert_recipients:
- description:
- - A list of recipient email IDs to notify the alert. This is required for O(state=present).
- type: list
- elements: str
- metric:
- description:
- - The metric on which to measure the condition that will trigger the alert. This is required for O(state=present).
- type: str
- choices: ['cpu', 'memory', 'disk']
- duration:
- description:
- - The length of time in minutes that the condition must exceed the threshold. This is required for O(state=present).
- type: str
- threshold:
- description:
- - The threshold that will trigger the alert when the metric equals or exceeds it. This is required for O(state=present).
- This number represents a percentage and must be a value between 5.0 - 95.0 that is a multiple of 5.0.
- type: int
- state:
- description:
- - Whether to create or delete the policy.
- type: str
- default: present
- choices: ['present', 'absent']
-"""
-
-EXAMPLES = r"""
-- name: Create Alert Policy Example
- hosts: localhost
- gather_facts: false
- connection: local
- tasks:
- - name: Create an Alert Policy for disk above 80% for 5 minutes
- community.general.clc_alert_policy:
- alias: wfad
- name: 'alert for disk > 80%'
- alert_recipients:
- - test1@centurylink.com
- - test2@centurylink.com
- metric: 'disk'
- duration: '00:05:00'
- threshold: 80
- state: present
- register: policy
-
- - name: Debug
- ansible.builtin.debug: var=policy
-
-- name: Delete Alert Policy Example
- hosts: localhost
- gather_facts: false
- connection: local
- tasks:
- - name: Delete an Alert Policy
- community.general.clc_alert_policy:
- alias: wfad
- name: 'alert for disk > 80%'
- state: absent
- register: policy
-
- - name: Debug
- ansible.builtin.debug: var=policy
-"""
-
-RETURN = r"""
-policy:
- description: The alert policy information.
- returned: success
- type: dict
- sample:
- {
- "actions": [
- {
- "action": "email",
- "settings": {
- "recipients": [
- "user1@domain.com",
- "user1@domain.com"
- ]
- }
- }
- ],
- "id": "ba54ac54a60d4a4f1ed6d48c1ce240a7",
- "links": [
- {
- "href": "/v2/alertPolicies/alias/ba54ac54a60d4a4fb1d6d48c1ce240a7",
- "rel": "self",
- "verbs": [
- "GET",
- "DELETE",
- "PUT"
- ]
- }
- ],
- "name": "test_alert",
- "triggers": [
- {
- "duration": "00:05:00",
- "metric": "disk",
- "threshold": 80.0
- }
- ]
- }
-"""
-
-__version__ = '${version}'
-
-import json
-import os
-import traceback
-
-from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
-
-REQUESTS_IMP_ERR = None
-try:
- import requests
-except ImportError:
- REQUESTS_IMP_ERR = traceback.format_exc()
- REQUESTS_FOUND = False
-else:
- REQUESTS_FOUND = True
-
-#
-# Requires the clc-python-sdk.
-# sudo pip install clc-sdk
-#
-CLC_IMP_ERR = None
-try:
- import clc as clc_sdk
- from clc import APIFailedResponse
-except ImportError:
- CLC_IMP_ERR = traceback.format_exc()
- CLC_FOUND = False
- clc_sdk = None
-else:
- CLC_FOUND = True
-
-from ansible.module_utils.basic import AnsibleModule, missing_required_lib
-
-
-class ClcAlertPolicy:
-
- clc = clc_sdk
- module = None
-
- def __init__(self, module):
- """
- Construct module
- """
- self.module = module
- self.policy_dict = {}
-
- if not CLC_FOUND:
- self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR)
- if not REQUESTS_FOUND:
- self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR)
- if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'):
- self.module.fail_json(
- msg='requests library version should be >= 2.5.0')
-
- self._set_user_agent(self.clc)
-
- @staticmethod
- def _define_module_argument_spec():
- """
- Define the argument spec for the ansible module
- :return: argument spec dictionary
- """
- argument_spec = dict(
- name=dict(),
- id=dict(),
- alias=dict(required=True),
- alert_recipients=dict(type='list', elements='str'),
- metric=dict(
- choices=[
- 'cpu',
- 'memory',
- 'disk']),
- duration=dict(type='str'),
- threshold=dict(type='int'),
- state=dict(default='present', choices=['present', 'absent'])
- )
- mutually_exclusive = [
- ['name', 'id']
- ]
- return {'argument_spec': argument_spec,
- 'mutually_exclusive': mutually_exclusive}
-
- # Module Behavior Goodness
- def process_request(self):
- """
- Process the request - Main Code Path
- :return: Returns with either an exit_json or fail_json
- """
- p = self.module.params
-
- self._set_clc_credentials_from_env()
- self.policy_dict = self._get_alert_policies(p['alias'])
-
- if p['state'] == 'present':
- changed, policy = self._ensure_alert_policy_is_present()
- else:
- changed, policy = self._ensure_alert_policy_is_absent()
-
- self.module.exit_json(changed=changed, policy=policy)
-
- def _set_clc_credentials_from_env(self):
- """
- Set the CLC Credentials on the sdk by reading environment variables
- :return: none
- """
- env = os.environ
- v2_api_token = env.get('CLC_V2_API_TOKEN', False)
- v2_api_username = env.get('CLC_V2_API_USERNAME', False)
- v2_api_passwd = env.get('CLC_V2_API_PASSWD', False)
- clc_alias = env.get('CLC_ACCT_ALIAS', False)
- api_url = env.get('CLC_V2_API_URL', False)
-
- if api_url:
- self.clc.defaults.ENDPOINT_URL_V2 = api_url
-
- if v2_api_token and clc_alias:
- self.clc._LOGIN_TOKEN_V2 = v2_api_token
- self.clc._V2_ENABLED = True
- self.clc.ALIAS = clc_alias
- elif v2_api_username and v2_api_passwd:
- self.clc.v2.SetCredentials(
- api_username=v2_api_username,
- api_passwd=v2_api_passwd)
- else:
- return self.module.fail_json(
- msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD "
- "environment variables")
-
- def _ensure_alert_policy_is_present(self):
- """
- Ensures that the alert policy is present
- :return: (changed, policy)
- changed: A flag representing if anything is modified
- policy: the created/updated alert policy
- """
- changed = False
- p = self.module.params
- policy_name = p.get('name')
-
- if not policy_name:
- self.module.fail_json(msg='Policy name is a required')
- policy = self._alert_policy_exists(policy_name)
- if not policy:
- changed = True
- policy = None
- if not self.module.check_mode:
- policy = self._create_alert_policy()
- else:
- changed_u, policy = self._ensure_alert_policy_is_updated(policy)
- if changed_u:
- changed = True
- return changed, policy
-
- def _ensure_alert_policy_is_absent(self):
- """
- Ensures that the alert policy is absent
- :return: (changed, None)
- changed: A flag representing if anything is modified
- """
- changed = False
- p = self.module.params
- alert_policy_id = p.get('id')
- alert_policy_name = p.get('name')
- alias = p.get('alias')
- if not alert_policy_id and not alert_policy_name:
- self.module.fail_json(
- msg='Either alert policy id or policy name is required')
- if not alert_policy_id and alert_policy_name:
- alert_policy_id = self._get_alert_policy_id(
- self.module,
- alert_policy_name)
- if alert_policy_id and alert_policy_id in self.policy_dict:
- changed = True
- if not self.module.check_mode:
- self._delete_alert_policy(alias, alert_policy_id)
- return changed, None
-
- def _ensure_alert_policy_is_updated(self, alert_policy):
- """
- Ensures the alert policy is updated if anything is changed in the alert policy configuration
- :param alert_policy: the target alert policy
- :return: (changed, policy)
- changed: A flag representing if anything is modified
- policy: the updated the alert policy
- """
- changed = False
- p = self.module.params
- alert_policy_id = alert_policy.get('id')
- email_list = p.get('alert_recipients')
- metric = p.get('metric')
- duration = p.get('duration')
- threshold = p.get('threshold')
- policy = alert_policy
- if (metric and metric != str(alert_policy.get('triggers')[0].get('metric'))) or \
- (duration and duration != str(alert_policy.get('triggers')[0].get('duration'))) or \
- (threshold and float(threshold) != float(alert_policy.get('triggers')[0].get('threshold'))):
- changed = True
- elif email_list:
- t_email_list = list(
- alert_policy.get('actions')[0].get('settings').get('recipients'))
- if set(email_list) != set(t_email_list):
- changed = True
- if changed and not self.module.check_mode:
- policy = self._update_alert_policy(alert_policy_id)
- return changed, policy
-
- def _get_alert_policies(self, alias):
- """
- Get the alert policies for account alias by calling the CLC API.
- :param alias: the account alias
- :return: the alert policies for the account alias
- """
- response = {}
-
- policies = self.clc.v2.API.Call('GET',
- '/v2/alertPolicies/%s'
- % alias)
-
- for policy in policies.get('items'):
- response[policy.get('id')] = policy
- return response
-
- def _create_alert_policy(self):
- """
- Create an alert Policy using the CLC API.
- :return: response dictionary from the CLC API.
- """
- p = self.module.params
- alias = p['alias']
- email_list = p['alert_recipients']
- metric = p['metric']
- duration = p['duration']
- threshold = p['threshold']
- policy_name = p['name']
- arguments = json.dumps(
- {
- 'name': policy_name,
- 'actions': [{
- 'action': 'email',
- 'settings': {
- 'recipients': email_list
- }
- }],
- 'triggers': [{
- 'metric': metric,
- 'duration': duration,
- 'threshold': threshold
- }]
- }
- )
- try:
- result = self.clc.v2.API.Call(
- 'POST',
- '/v2/alertPolicies/%s' % alias,
- arguments)
- except APIFailedResponse as e:
- return self.module.fail_json(
- msg='Unable to create alert policy "{0}". {1}'.format(
- policy_name, str(e.response_text)))
- return result
-
- def _update_alert_policy(self, alert_policy_id):
- """
- Update alert policy using the CLC API.
- :param alert_policy_id: The clc alert policy id
- :return: response dictionary from the CLC API.
- """
- p = self.module.params
- alias = p['alias']
- email_list = p['alert_recipients']
- metric = p['metric']
- duration = p['duration']
- threshold = p['threshold']
- policy_name = p['name']
- arguments = json.dumps(
- {
- 'name': policy_name,
- 'actions': [{
- 'action': 'email',
- 'settings': {
- 'recipients': email_list
- }
- }],
- 'triggers': [{
- 'metric': metric,
- 'duration': duration,
- 'threshold': threshold
- }]
- }
- )
- try:
- result = self.clc.v2.API.Call(
- 'PUT', '/v2/alertPolicies/%s/%s' %
- (alias, alert_policy_id), arguments)
- except APIFailedResponse as e:
- return self.module.fail_json(
- msg='Unable to update alert policy "{0}". {1}'.format(
- policy_name, str(e.response_text)))
- return result
-
- def _delete_alert_policy(self, alias, policy_id):
- """
- Delete an alert policy using the CLC API.
- :param alias : the account alias
- :param policy_id: the alert policy id
- :return: response dictionary from the CLC API.
- """
- try:
- result = self.clc.v2.API.Call(
- 'DELETE', '/v2/alertPolicies/%s/%s' %
- (alias, policy_id), None)
- except APIFailedResponse as e:
- return self.module.fail_json(
- msg='Unable to delete alert policy id "{0}". {1}'.format(
- policy_id, str(e.response_text)))
- return result
-
- def _alert_policy_exists(self, policy_name):
- """
- Check to see if an alert policy exists
- :param policy_name: name of the alert policy
- :return: boolean of if the policy exists
- """
- result = False
- for policy_id in self.policy_dict:
- if self.policy_dict.get(policy_id).get('name') == policy_name:
- result = self.policy_dict.get(policy_id)
- return result
-
- def _get_alert_policy_id(self, module, alert_policy_name):
- """
- retrieves the alert policy id of the account based on the name of the policy
- :param module: the AnsibleModule object
- :param alert_policy_name: the alert policy name
- :return: alert_policy_id: The alert policy id
- """
- alert_policy_id = None
- for policy_id in self.policy_dict:
- if self.policy_dict.get(policy_id).get('name') == alert_policy_name:
- if not alert_policy_id:
- alert_policy_id = policy_id
- else:
- return module.fail_json(
- msg='multiple alert policies were found with policy name : %s' % alert_policy_name)
- return alert_policy_id
-
- @staticmethod
- def _set_user_agent(clc):
- if hasattr(clc, 'SetRequestsSession'):
- agent_string = "ClcAnsibleModule/" + __version__
- ses = requests.Session()
- ses.headers.update({"Api-Client": agent_string})
- ses.headers['User-Agent'] += " " + agent_string
- clc.SetRequestsSession(ses)
-
-
-def main():
- """
- The main function. Instantiates the module and calls process_request.
- :return: none
- """
- argument_dict = ClcAlertPolicy._define_module_argument_spec()
- module = AnsibleModule(supports_check_mode=True, **argument_dict)
- clc_alert_policy = ClcAlertPolicy(module)
- clc_alert_policy.process_request()
-
-
-if __name__ == '__main__':
- main()
diff --git a/plugins/modules/clc_blueprint_package.py b/plugins/modules/clc_blueprint_package.py
deleted file mode 100644
index 2012c0fba3..0000000000
--- a/plugins/modules/clc_blueprint_package.py
+++ /dev/null
@@ -1,299 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-#
-# Copyright (c) 2015 CenturyLink
-# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = r"""
-module: clc_blueprint_package
-short_description: Deploys a blue print package on a set of servers in CenturyLink Cloud
-description:
- - An Ansible module to deploy blue print package on a set of servers in CenturyLink Cloud.
-deprecated:
- removed_in: 11.0.0
- why: >
- Lumen Public Cloud (formerly known as CenturyLink Cloud) has gone End-of-Life in September 2023.
- See more at U(https://www.ctl.io/knowledge-base/release-notes/2023/lumen-public-cloud-platform-end-of-life-notice/?).
- alternative: There is none.
-extends_documentation_fragment:
- - community.general.attributes
- - community.general.clc
-author:
- - "CLC Runner (@clc-runner)"
-attributes:
- check_mode:
- support: full
- diff_mode:
- support: none
-options:
- server_ids:
- description:
- - A list of server IDs to deploy the blue print package.
- type: list
- required: true
- elements: str
- package_id:
- description:
- - The package ID of the blue print.
- type: str
- required: true
- package_params:
- description:
- - The dictionary of arguments required to deploy the blue print.
- type: dict
- default: {}
- required: false
- state:
- description:
- - Whether to install or uninstall the package. Currently it supports only V(present) for install action.
- type: str
- required: false
- default: present
- choices: ['present']
- wait:
- description:
- - Whether to wait for the tasks to finish before returning.
- type: str
- default: 'True'
- required: false
-"""
-
-EXAMPLES = r"""
-# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples
-
-- name: Deploy package
- community.general.clc_blueprint_package:
- server_ids:
- - UC1TEST-SERVER1
- - UC1TEST-SERVER2
- package_id: 77abb844-579d-478d-3955-c69ab4a7ba1a
- package_params: {}
-"""
-
-RETURN = r"""
-server_ids:
- description: The list of server IDs that are changed.
- returned: success
- type: list
- sample: ["UC1TEST-SERVER1", "UC1TEST-SERVER2"]
-"""
-
-__version__ = '${version}'
-
-import os
-import traceback
-
-from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
-
-REQUESTS_IMP_ERR = None
-try:
- import requests
-except ImportError:
- REQUESTS_IMP_ERR = traceback.format_exc()
- REQUESTS_FOUND = False
-else:
- REQUESTS_FOUND = True
-
-#
-# Requires the clc-python-sdk.
-# sudo pip install clc-sdk
-#
-CLC_IMP_ERR = None
-try:
- import clc as clc_sdk
- from clc import CLCException
-except ImportError:
- CLC_IMP_ERR = traceback.format_exc()
- CLC_FOUND = False
- clc_sdk = None
-else:
- CLC_FOUND = True
-
-from ansible.module_utils.basic import AnsibleModule, missing_required_lib
-
-
-class ClcBlueprintPackage:
-
- clc = clc_sdk
- module = None
-
- def __init__(self, module):
- """
- Construct module
- """
- self.module = module
- if not CLC_FOUND:
- self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR)
- if not REQUESTS_FOUND:
- self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR)
- if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'):
- self.module.fail_json(
- msg='requests library version should be >= 2.5.0')
-
- self._set_user_agent(self.clc)
-
- def process_request(self):
- """
- Process the request - Main Code Path
- :return: Returns with either an exit_json or fail_json
- """
- p = self.module.params
- changed = False
- changed_server_ids = []
- self._set_clc_credentials_from_env()
- server_ids = p['server_ids']
- package_id = p['package_id']
- package_params = p['package_params']
- state = p['state']
- if state == 'present':
- changed, changed_server_ids, request_list = self.ensure_package_installed(
- server_ids, package_id, package_params)
- self._wait_for_requests_to_complete(request_list)
- self.module.exit_json(changed=changed, server_ids=changed_server_ids)
-
- @staticmethod
- def define_argument_spec():
- """
- This function defines the dictionary object required for
- package module
- :return: the package dictionary object
- """
- argument_spec = dict(
- server_ids=dict(type='list', elements='str', required=True),
- package_id=dict(required=True),
- package_params=dict(type='dict', default={}),
- wait=dict(default=True), # @FIXME should be bool?
- state=dict(default='present', choices=['present'])
- )
- return argument_spec
-
- def ensure_package_installed(self, server_ids, package_id, package_params):
- """
- Ensure the package is installed in the given list of servers
- :param server_ids: the server list where the package needs to be installed
- :param package_id: the blueprint package id
- :param package_params: the package arguments
- :return: (changed, server_ids, request_list)
- changed: A flag indicating if a change was made
- server_ids: The list of servers modified
- request_list: The list of request objects from clc-sdk
- """
- changed = False
- request_list = []
- servers = self._get_servers_from_clc(
- server_ids,
- 'Failed to get servers from CLC')
- for server in servers:
- if not self.module.check_mode:
- request = self.clc_install_package(
- server,
- package_id,
- package_params)
- request_list.append(request)
- changed = True
- return changed, server_ids, request_list
-
- def clc_install_package(self, server, package_id, package_params):
- """
- Install the package to a given clc server
- :param server: The server object where the package needs to be installed
- :param package_id: The blue print package id
- :param package_params: the required argument dict for the package installation
- :return: The result object from the CLC API call
- """
- result = None
- try:
- result = server.ExecutePackage(
- package_id=package_id,
- parameters=package_params)
- except CLCException as ex:
- self.module.fail_json(msg='Failed to install package : {0} to server {1}. {2}'.format(
- package_id, server.id, ex.message
- ))
- return result
-
- def _wait_for_requests_to_complete(self, request_lst):
- """
- Waits until the CLC requests are complete if the wait argument is True
- :param request_lst: The list of CLC request objects
- :return: none
- """
- if not self.module.params['wait']:
- return
- for request in request_lst:
- request.WaitUntilComplete()
- for request_details in request.requests:
- if request_details.Status() != 'succeeded':
- self.module.fail_json(
- msg='Unable to process package install request')
-
- def _get_servers_from_clc(self, server_list, message):
- """
- Internal function to fetch list of CLC server objects from a list of server ids
- :param server_list: the list of server ids
- :param message: the error message to raise if there is any error
- :return the list of CLC server objects
- """
- try:
- return self.clc.v2.Servers(server_list).servers
- except CLCException as ex:
- self.module.fail_json(msg=message + ': %s' % ex)
-
- def _set_clc_credentials_from_env(self):
- """
- Set the CLC Credentials on the sdk by reading environment variables
- :return: none
- """
- env = os.environ
- v2_api_token = env.get('CLC_V2_API_TOKEN', False)
- v2_api_username = env.get('CLC_V2_API_USERNAME', False)
- v2_api_passwd = env.get('CLC_V2_API_PASSWD', False)
- clc_alias = env.get('CLC_ACCT_ALIAS', False)
- api_url = env.get('CLC_V2_API_URL', False)
-
- if api_url:
- self.clc.defaults.ENDPOINT_URL_V2 = api_url
-
- if v2_api_token and clc_alias:
- self.clc._LOGIN_TOKEN_V2 = v2_api_token
- self.clc._V2_ENABLED = True
- self.clc.ALIAS = clc_alias
- elif v2_api_username and v2_api_passwd:
- self.clc.v2.SetCredentials(
- api_username=v2_api_username,
- api_passwd=v2_api_passwd)
- else:
- return self.module.fail_json(
- msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD "
- "environment variables")
-
- @staticmethod
- def _set_user_agent(clc):
- if hasattr(clc, 'SetRequestsSession'):
- agent_string = "ClcAnsibleModule/" + __version__
- ses = requests.Session()
- ses.headers.update({"Api-Client": agent_string})
- ses.headers['User-Agent'] += " " + agent_string
- clc.SetRequestsSession(ses)
-
-
-def main():
- """
- Main function
- :return: None
- """
- module = AnsibleModule(
- argument_spec=ClcBlueprintPackage.define_argument_spec(),
- supports_check_mode=True
- )
- clc_blueprint_package = ClcBlueprintPackage(module)
- clc_blueprint_package.process_request()
-
-
-if __name__ == '__main__':
- main()
diff --git a/plugins/modules/clc_firewall_policy.py b/plugins/modules/clc_firewall_policy.py
deleted file mode 100644
index 37672df7f5..0000000000
--- a/plugins/modules/clc_firewall_policy.py
+++ /dev/null
@@ -1,586 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-#
-# Copyright (c) 2015 CenturyLink
-# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = r"""
-module: clc_firewall_policy
-short_description: Create/delete/update firewall policies
-description:
- - Create or delete or update firewall policies on Centurylink Cloud.
-deprecated:
- removed_in: 11.0.0
- why: >
- Lumen Public Cloud (formerly known as CenturyLink Cloud) has gone End-of-Life in September 2023.
- See more at U(https://www.ctl.io/knowledge-base/release-notes/2023/lumen-public-cloud-platform-end-of-life-notice/?).
- alternative: There is none.
-extends_documentation_fragment:
- - community.general.attributes
- - community.general.clc
-author:
- - "CLC Runner (@clc-runner)"
-attributes:
- check_mode:
- support: full
- diff_mode:
- support: none
-options:
- location:
- description:
- - Target datacenter for the firewall policy.
- type: str
- required: true
- state:
- description:
- - Whether to create or delete the firewall policy.
- type: str
- default: present
- choices: ['present', 'absent']
- source:
- description:
- - The list of source addresses for traffic on the originating firewall. This is required when O(state=present).
- type: list
- elements: str
- destination:
- description:
- - The list of destination addresses for traffic on the terminating firewall. This is required when O(state=present).
- type: list
- elements: str
- ports:
- description:
- - The list of ports associated with the policy. TCP and UDP can take in single ports or port ranges.
- - "Example: V(['any', 'icmp', 'TCP/123', 'UDP/123', 'TCP/123-456', 'UDP/123-456'])."
- type: list
- elements: str
- firewall_policy_id:
- description:
- - ID of the firewall policy. This is required to update or delete an existing firewall policy.
- type: str
- source_account_alias:
- description:
- - CLC alias for the source account.
- type: str
- required: true
- destination_account_alias:
- description:
- - CLC alias for the destination account.
- type: str
- wait:
- description:
- - Whether to wait for the provisioning tasks to finish before returning.
- type: str
- default: 'True'
- enabled:
- description:
- - Whether the firewall policy is enabled or disabled.
- type: str
- choices: ['True', 'False']
- default: 'True'
-"""
-
-EXAMPLES = r"""
-- name: Create Firewall Policy
- hosts: localhost
- gather_facts: false
- connection: local
- tasks:
- - name: Create / Verify an Firewall Policy at CenturyLink Cloud
- clc_firewall:
- source_account_alias: WFAD
- location: VA1
- state: present
- source: 10.128.216.0/24
- destination: 10.128.216.0/24
- ports: Any
- destination_account_alias: WFAD
-
-- name: Delete Firewall Policy
- hosts: localhost
- gather_facts: false
- connection: local
- tasks:
- - name: Delete an Firewall Policy at CenturyLink Cloud
- clc_firewall:
- source_account_alias: WFAD
- location: VA1
- state: absent
- firewall_policy_id: c62105233d7a4231bd2e91b9c791e43e1
-"""
-
-RETURN = r"""
-firewall_policy_id:
- description: The firewall policy ID.
- returned: success
- type: str
- sample: fc36f1bfd47242e488a9c44346438c05
-firewall_policy:
- description: The firewall policy information.
- returned: success
- type: dict
- sample:
- {
- "destination":[
- "10.1.1.0/24",
- "10.2.2.0/24"
- ],
- "destinationAccount":"wfad",
- "enabled":true,
- "id":"fc36f1bfd47242e488a9c44346438c05",
- "links":[
- {
- "href":"http://api.ctl.io/v2-experimental/firewallPolicies/wfad/uc1/fc36f1bfd47242e488a9c44346438c05",
- "rel":"self",
- "verbs":[
- "GET",
- "PUT",
- "DELETE"
- ]
- }
- ],
- "ports":[
- "any"
- ],
- "source":[
- "10.1.1.0/24",
- "10.2.2.0/24"
- ],
- "status":"active"
- }
-"""
-
-__version__ = '${version}'
-
-import os
-import traceback
-from ansible.module_utils.six.moves.urllib.parse import urlparse
-from time import sleep
-
-from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
-
-REQUESTS_IMP_ERR = None
-try:
- import requests
-except ImportError:
- REQUESTS_IMP_ERR = traceback.format_exc()
- REQUESTS_FOUND = False
-else:
- REQUESTS_FOUND = True
-
-CLC_IMP_ERR = None
-try:
- import clc as clc_sdk
- from clc import APIFailedResponse
-except ImportError:
- CLC_IMP_ERR = traceback.format_exc()
- CLC_FOUND = False
- clc_sdk = None
-else:
- CLC_FOUND = True
-
-from ansible.module_utils.basic import AnsibleModule, missing_required_lib
-
-
-class ClcFirewallPolicy:
-
- clc = None
-
- def __init__(self, module):
- """
- Construct module
- """
- self.clc = clc_sdk
- self.module = module
- self.firewall_dict = {}
-
- if not CLC_FOUND:
- self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR)
- if not REQUESTS_FOUND:
- self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR)
- if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'):
- self.module.fail_json(
- msg='requests library version should be >= 2.5.0')
-
- self._set_user_agent(self.clc)
-
- @staticmethod
- def _define_module_argument_spec():
- """
- Define the argument spec for the ansible module
- :return: argument spec dictionary
- """
- argument_spec = dict(
- location=dict(required=True),
- source_account_alias=dict(required=True),
- destination_account_alias=dict(),
- firewall_policy_id=dict(),
- ports=dict(type='list', elements='str'),
- source=dict(type='list', elements='str'),
- destination=dict(type='list', elements='str'),
- wait=dict(default=True), # @FIXME type=bool
- state=dict(default='present', choices=['present', 'absent']),
- enabled=dict(default=True, choices=[True, False])
- )
- return argument_spec
-
- def process_request(self):
- """
- Execute the main code path, and handle the request
- :return: none
- """
- changed = False
- firewall_policy = None
- location = self.module.params.get('location')
- source_account_alias = self.module.params.get('source_account_alias')
- destination_account_alias = self.module.params.get(
- 'destination_account_alias')
- firewall_policy_id = self.module.params.get('firewall_policy_id')
- ports = self.module.params.get('ports')
- source = self.module.params.get('source')
- destination = self.module.params.get('destination')
- wait = self.module.params.get('wait')
- state = self.module.params.get('state')
- enabled = self.module.params.get('enabled')
-
- self.firewall_dict = {
- 'location': location,
- 'source_account_alias': source_account_alias,
- 'destination_account_alias': destination_account_alias,
- 'firewall_policy_id': firewall_policy_id,
- 'ports': ports,
- 'source': source,
- 'destination': destination,
- 'wait': wait,
- 'state': state,
- 'enabled': enabled}
-
- self._set_clc_credentials_from_env()
-
- if state == 'absent':
- changed, firewall_policy_id, firewall_policy = self._ensure_firewall_policy_is_absent(
- source_account_alias, location, self.firewall_dict)
-
- elif state == 'present':
- changed, firewall_policy_id, firewall_policy = self._ensure_firewall_policy_is_present(
- source_account_alias, location, self.firewall_dict)
-
- return self.module.exit_json(
- changed=changed,
- firewall_policy_id=firewall_policy_id,
- firewall_policy=firewall_policy)
-
- @staticmethod
- def _get_policy_id_from_response(response):
- """
- Method to parse out the policy id from creation response
- :param response: response from firewall creation API call
- :return: policy_id: firewall policy id from creation call
- """
- url = response.get('links')[0]['href']
- path = urlparse(url).path
- path_list = os.path.split(path)
- policy_id = path_list[-1]
- return policy_id
-
- def _set_clc_credentials_from_env(self):
- """
- Set the CLC Credentials on the sdk by reading environment variables
- :return: none
- """
- env = os.environ
- v2_api_token = env.get('CLC_V2_API_TOKEN', False)
- v2_api_username = env.get('CLC_V2_API_USERNAME', False)
- v2_api_passwd = env.get('CLC_V2_API_PASSWD', False)
- clc_alias = env.get('CLC_ACCT_ALIAS', False)
- api_url = env.get('CLC_V2_API_URL', False)
-
- if api_url:
- self.clc.defaults.ENDPOINT_URL_V2 = api_url
-
- if v2_api_token and clc_alias:
- self.clc._LOGIN_TOKEN_V2 = v2_api_token
- self.clc._V2_ENABLED = True
- self.clc.ALIAS = clc_alias
- elif v2_api_username and v2_api_passwd:
- self.clc.v2.SetCredentials(
- api_username=v2_api_username,
- api_passwd=v2_api_passwd)
- else:
- return self.module.fail_json(
- msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD "
- "environment variables")
-
- def _ensure_firewall_policy_is_present(
- self,
- source_account_alias,
- location,
- firewall_dict):
- """
- Ensures that a given firewall policy is present
- :param source_account_alias: the source account alias for the firewall policy
- :param location: datacenter of the firewall policy
- :param firewall_dict: dictionary of request parameters for firewall policy
- :return: (changed, firewall_policy_id, firewall_policy)
- changed: flag for if a change occurred
- firewall_policy_id: the firewall policy id that was created/updated
- firewall_policy: The firewall_policy object
- """
- firewall_policy = None
- firewall_policy_id = firewall_dict.get('firewall_policy_id')
-
- if firewall_policy_id is None:
- if not self.module.check_mode:
- response = self._create_firewall_policy(
- source_account_alias,
- location,
- firewall_dict)
- firewall_policy_id = self._get_policy_id_from_response(
- response)
- changed = True
- else:
- firewall_policy = self._get_firewall_policy(
- source_account_alias, location, firewall_policy_id)
- if not firewall_policy:
- return self.module.fail_json(
- msg='Unable to find the firewall policy id : {0}'.format(
- firewall_policy_id))
- changed = self._compare_get_request_with_dict(
- firewall_policy,
- firewall_dict)
- if not self.module.check_mode and changed:
- self._update_firewall_policy(
- source_account_alias,
- location,
- firewall_policy_id,
- firewall_dict)
- if changed and firewall_policy_id:
- firewall_policy = self._wait_for_requests_to_complete(
- source_account_alias,
- location,
- firewall_policy_id)
- return changed, firewall_policy_id, firewall_policy
-
- def _ensure_firewall_policy_is_absent(
- self,
- source_account_alias,
- location,
- firewall_dict):
- """
- Ensures that a given firewall policy is removed if present
- :param source_account_alias: the source account alias for the firewall policy
- :param location: datacenter of the firewall policy
- :param firewall_dict: firewall policy to delete
- :return: (changed, firewall_policy_id, response)
- changed: flag for if a change occurred
- firewall_policy_id: the firewall policy id that was deleted
- response: response from CLC API call
- """
- changed = False
- response = []
- firewall_policy_id = firewall_dict.get('firewall_policy_id')
- result = self._get_firewall_policy(
- source_account_alias, location, firewall_policy_id)
- if result:
- if not self.module.check_mode:
- response = self._delete_firewall_policy(
- source_account_alias,
- location,
- firewall_policy_id)
- changed = True
- return changed, firewall_policy_id, response
-
- def _create_firewall_policy(
- self,
- source_account_alias,
- location,
- firewall_dict):
- """
- Creates the firewall policy for the given account alias
- :param source_account_alias: the source account alias for the firewall policy
- :param location: datacenter of the firewall policy
- :param firewall_dict: dictionary of request parameters for firewall policy
- :return: response from CLC API call
- """
- payload = {
- 'destinationAccount': firewall_dict.get('destination_account_alias'),
- 'source': firewall_dict.get('source'),
- 'destination': firewall_dict.get('destination'),
- 'ports': firewall_dict.get('ports')}
- try:
- response = self.clc.v2.API.Call(
- 'POST', '/v2-experimental/firewallPolicies/%s/%s' %
- (source_account_alias, location), payload)
- except APIFailedResponse as e:
- return self.module.fail_json(
- msg="Unable to create firewall policy. %s" %
- str(e.response_text))
- return response
-
- def _delete_firewall_policy(
- self,
- source_account_alias,
- location,
- firewall_policy_id):
- """
- Deletes a given firewall policy for an account alias in a datacenter
- :param source_account_alias: the source account alias for the firewall policy
- :param location: datacenter of the firewall policy
- :param firewall_policy_id: firewall policy id to delete
- :return: response: response from CLC API call
- """
- try:
- response = self.clc.v2.API.Call(
- 'DELETE', '/v2-experimental/firewallPolicies/%s/%s/%s' %
- (source_account_alias, location, firewall_policy_id))
- except APIFailedResponse as e:
- return self.module.fail_json(
- msg="Unable to delete the firewall policy id : {0}. {1}".format(
- firewall_policy_id, str(e.response_text)))
- return response
-
- def _update_firewall_policy(
- self,
- source_account_alias,
- location,
- firewall_policy_id,
- firewall_dict):
- """
- Updates a firewall policy for a given datacenter and account alias
- :param source_account_alias: the source account alias for the firewall policy
- :param location: datacenter of the firewall policy
- :param firewall_policy_id: firewall policy id to update
- :param firewall_dict: dictionary of request parameters for firewall policy
- :return: response: response from CLC API call
- """
- try:
- response = self.clc.v2.API.Call(
- 'PUT',
- '/v2-experimental/firewallPolicies/%s/%s/%s' %
- (source_account_alias,
- location,
- firewall_policy_id),
- firewall_dict)
- except APIFailedResponse as e:
- return self.module.fail_json(
- msg="Unable to update the firewall policy id : {0}. {1}".format(
- firewall_policy_id, str(e.response_text)))
- return response
-
- @staticmethod
- def _compare_get_request_with_dict(response, firewall_dict):
- """
- Helper method to compare the json response for getting the firewall policy with the request parameters
- :param response: response from the get method
- :param firewall_dict: dictionary of request parameters for firewall policy
- :return: changed: Boolean that returns true if there are differences between
- the response parameters and the playbook parameters
- """
-
- changed = False
-
- response_dest_account_alias = response.get('destinationAccount')
- response_enabled = response.get('enabled')
- response_source = response.get('source')
- response_dest = response.get('destination')
- response_ports = response.get('ports')
- request_dest_account_alias = firewall_dict.get(
- 'destination_account_alias')
- request_enabled = firewall_dict.get('enabled')
- if request_enabled is None:
- request_enabled = True
- request_source = firewall_dict.get('source')
- request_dest = firewall_dict.get('destination')
- request_ports = firewall_dict.get('ports')
-
- if (
- response_dest_account_alias and str(response_dest_account_alias) != str(request_dest_account_alias)) or (
- response_enabled != request_enabled) or (
- response_source and response_source != request_source) or (
- response_dest and response_dest != request_dest) or (
- response_ports and response_ports != request_ports):
- changed = True
- return changed
-
- def _get_firewall_policy(
- self,
- source_account_alias,
- location,
- firewall_policy_id):
- """
- Get back details for a particular firewall policy
- :param source_account_alias: the source account alias for the firewall policy
- :param location: datacenter of the firewall policy
- :param firewall_policy_id: id of the firewall policy to get
- :return: response - The response from CLC API call
- """
- response = None
- try:
- response = self.clc.v2.API.Call(
- 'GET', '/v2-experimental/firewallPolicies/%s/%s/%s' %
- (source_account_alias, location, firewall_policy_id))
- except APIFailedResponse as e:
- if e.response_status_code != 404:
- self.module.fail_json(
- msg="Unable to fetch the firewall policy with id : {0}. {1}".format(
- firewall_policy_id, str(e.response_text)))
- return response
-
- def _wait_for_requests_to_complete(
- self,
- source_account_alias,
- location,
- firewall_policy_id,
- wait_limit=50):
- """
- Waits until the CLC requests are complete if the wait argument is True
- :param source_account_alias: The source account alias for the firewall policy
- :param location: datacenter of the firewall policy
- :param firewall_policy_id: The firewall policy id
- :param wait_limit: The number of times to check the status for completion
- :return: the firewall_policy object
- """
- wait = self.module.params.get('wait')
- count = 0
- firewall_policy = None
- while wait:
- count += 1
- firewall_policy = self._get_firewall_policy(
- source_account_alias, location, firewall_policy_id)
- status = firewall_policy.get('status')
- if status == 'active' or count > wait_limit:
- wait = False
- else:
- # wait for 2 seconds
- sleep(2)
- return firewall_policy
-
- @staticmethod
- def _set_user_agent(clc):
- if hasattr(clc, 'SetRequestsSession'):
- agent_string = "ClcAnsibleModule/" + __version__
- ses = requests.Session()
- ses.headers.update({"Api-Client": agent_string})
- ses.headers['User-Agent'] += " " + agent_string
- clc.SetRequestsSession(ses)
-
-
-def main():
- """
- The main function. Instantiates the module and calls process_request.
- :return: none
- """
- module = AnsibleModule(
- argument_spec=ClcFirewallPolicy._define_module_argument_spec(),
- supports_check_mode=True)
-
- clc_firewall = ClcFirewallPolicy(module)
- clc_firewall.process_request()
-
-
-if __name__ == '__main__':
- main()
diff --git a/plugins/modules/clc_group.py b/plugins/modules/clc_group.py
deleted file mode 100644
index 967596ed3e..0000000000
--- a/plugins/modules/clc_group.py
+++ /dev/null
@@ -1,512 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-#
-# Copyright (c) 2015 CenturyLink
-# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = r"""
-module: clc_group
-short_description: Create/delete Server Groups at Centurylink Cloud
-description:
- - Create or delete Server Groups at Centurylink Centurylink Cloud.
-deprecated:
- removed_in: 11.0.0
- why: >
- Lumen Public Cloud (formerly known as CenturyLink Cloud) has gone End-of-Life in September 2023.
- See more at U(https://www.ctl.io/knowledge-base/release-notes/2023/lumen-public-cloud-platform-end-of-life-notice/?).
- alternative: There is none.
-extends_documentation_fragment:
- - community.general.attributes
- - community.general.clc
-author:
- - "CLC Runner (@clc-runner)"
-attributes:
- check_mode:
- support: full
- diff_mode:
- support: none
-options:
- name:
- description:
- - The name of the Server Group.
- type: str
- required: true
- description:
- description:
- - A description of the Server Group.
- type: str
- required: false
- parent:
- description:
- - The parent group of the server group. If parent is not provided, it creates the group at top level.
- type: str
- required: false
- location:
- description:
- - Datacenter to create the group in. If location is not provided, the group gets created in the default datacenter associated
- with the account.
- type: str
- required: false
- state:
- description:
- - Whether to create or delete the group.
- type: str
- default: present
- choices: ['present', 'absent']
- wait:
- description:
- - Whether to wait for the tasks to finish before returning.
- type: bool
- default: true
- required: false
-"""
-
-EXAMPLES = r"""
-# Create a Server Group
-- name: Create Server Group
- hosts: localhost
- gather_facts: false
- connection: local
- tasks:
- - name: Create / Verify a Server Group at CenturyLink Cloud
- community.general.clc_group:
- name: My Cool Server Group
- parent: Default Group
- state: present
- register: clc
-
- - name: Debug
- ansible.builtin.debug:
- var: clc
-
-# Delete a Server Group
-- name: Delete Server Group
- hosts: localhost
- gather_facts: false
- connection: local
- tasks:
- - name: Delete / Verify Absent a Server Group at CenturyLink Cloud
- community.general.clc_group:
- name: My Cool Server Group
- parent: Default Group
- state: absent
- register: clc
-
- - name: Debug
- ansible.builtin.debug:
- var: clc
-"""
-
-RETURN = r"""
-group:
- description: The group information.
- returned: success
- type: dict
- sample:
- {
- "changeInfo":{
- "createdBy":"service.wfad",
- "createdDate":"2015-07-29T18:52:47Z",
- "modifiedBy":"service.wfad",
- "modifiedDate":"2015-07-29T18:52:47Z"
- },
- "customFields":[
-
- ],
- "description":"test group",
- "groups":[
-
- ],
- "id":"bb5f12a3c6044ae4ad0a03e73ae12cd1",
- "links":[
- {
- "href":"/v2/groups/wfad",
- "rel":"createGroup",
- "verbs":[
- "POST"
- ]
- },
- {
- "href":"/v2/servers/wfad",
- "rel":"createServer",
- "verbs":[
- "POST"
- ]
- },
- {
- "href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1",
- "rel":"self",
- "verbs":[
- "GET",
- "PATCH",
- "DELETE"
- ]
- },
- {
- "href":"/v2/groups/wfad/086ac1dfe0b6411989e8d1b77c4065f0",
- "id":"086ac1dfe0b6411989e8d1b77c4065f0",
- "rel":"parentGroup"
- },
- {
- "href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/defaults",
- "rel":"defaults",
- "verbs":[
- "GET",
- "POST"
- ]
- },
- {
- "href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/billing",
- "rel":"billing"
- },
- {
- "href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/archive",
- "rel":"archiveGroupAction"
- },
- {
- "href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/statistics",
- "rel":"statistics"
- },
- {
- "href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/upcomingScheduledActivities",
- "rel":"upcomingScheduledActivities"
- },
- {
- "href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/horizontalAutoscalePolicy",
- "rel":"horizontalAutoscalePolicyMapping",
- "verbs":[
- "GET",
- "PUT",
- "DELETE"
- ]
- },
- {
- "href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/scheduledActivities",
- "rel":"scheduledActivities",
- "verbs":[
- "GET",
- "POST"
- ]
- }
- ],
- "locationId":"UC1",
- "name":"test group",
- "status":"active",
- "type":"default"
- }
-"""
-
-__version__ = '${version}'
-
-import os
-import traceback
-
-from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
-
-REQUESTS_IMP_ERR = None
-try:
- import requests
-except ImportError:
- REQUESTS_IMP_ERR = traceback.format_exc()
- REQUESTS_FOUND = False
-else:
- REQUESTS_FOUND = True
-
-#
-# Requires the clc-python-sdk.
-# sudo pip install clc-sdk
-#
-CLC_IMP_ERR = None
-try:
- import clc as clc_sdk
- from clc import CLCException
-except ImportError:
- CLC_IMP_ERR = traceback.format_exc()
- CLC_FOUND = False
- clc_sdk = None
-else:
- CLC_FOUND = True
-
-from ansible.module_utils.basic import AnsibleModule, missing_required_lib
-
-
-class ClcGroup(object):
-
- clc = None
- root_group = None
-
- def __init__(self, module):
- """
- Construct module
- """
- self.clc = clc_sdk
- self.module = module
- self.group_dict = {}
-
- if not CLC_FOUND:
- self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR)
- if not REQUESTS_FOUND:
- self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR)
- if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'):
- self.module.fail_json(
- msg='requests library version should be >= 2.5.0')
-
- self._set_user_agent(self.clc)
-
- def process_request(self):
- """
- Execute the main code path, and handle the request
- :return: none
- """
- location = self.module.params.get('location')
- group_name = self.module.params.get('name')
- parent_name = self.module.params.get('parent')
- group_description = self.module.params.get('description')
- state = self.module.params.get('state')
-
- self._set_clc_credentials_from_env()
- self.group_dict = self._get_group_tree_for_datacenter(
- datacenter=location)
-
- if state == "absent":
- changed, group, requests = self._ensure_group_is_absent(
- group_name=group_name, parent_name=parent_name)
- if requests:
- self._wait_for_requests_to_complete(requests)
- else:
- changed, group = self._ensure_group_is_present(
- group_name=group_name, parent_name=parent_name, group_description=group_description)
- try:
- group = group.data
- except AttributeError:
- group = group_name
- self.module.exit_json(changed=changed, group=group)
-
- @staticmethod
- def _define_module_argument_spec():
- """
- Define the argument spec for the ansible module
- :return: argument spec dictionary
- """
- argument_spec = dict(
- name=dict(required=True),
- description=dict(),
- parent=dict(),
- location=dict(),
- state=dict(default='present', choices=['present', 'absent']),
- wait=dict(type='bool', default=True))
-
- return argument_spec
-
- def _set_clc_credentials_from_env(self):
- """
- Set the CLC Credentials on the sdk by reading environment variables
- :return: none
- """
- env = os.environ
- v2_api_token = env.get('CLC_V2_API_TOKEN', False)
- v2_api_username = env.get('CLC_V2_API_USERNAME', False)
- v2_api_passwd = env.get('CLC_V2_API_PASSWD', False)
- clc_alias = env.get('CLC_ACCT_ALIAS', False)
- api_url = env.get('CLC_V2_API_URL', False)
-
- if api_url:
- self.clc.defaults.ENDPOINT_URL_V2 = api_url
-
- if v2_api_token and clc_alias:
- self.clc._LOGIN_TOKEN_V2 = v2_api_token
- self.clc._V2_ENABLED = True
- self.clc.ALIAS = clc_alias
- elif v2_api_username and v2_api_passwd:
- self.clc.v2.SetCredentials(
- api_username=v2_api_username,
- api_passwd=v2_api_passwd)
- else:
- return self.module.fail_json(
- msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD "
- "environment variables")
-
- def _ensure_group_is_absent(self, group_name, parent_name):
- """
- Ensure that group_name is absent by deleting it if necessary
- :param group_name: string - the name of the clc server group to delete
- :param parent_name: string - the name of the parent group for group_name
- :return: changed, group
- """
- changed = False
- group = []
- results = []
-
- if self._group_exists(group_name=group_name, parent_name=parent_name):
- if not self.module.check_mode:
- group.append(group_name)
- result = self._delete_group(group_name)
- results.append(result)
- changed = True
- return changed, group, results
-
- def _delete_group(self, group_name):
- """
- Delete the provided server group
- :param group_name: string - the server group to delete
- :return: none
- """
- response = None
- group, parent = self.group_dict.get(group_name)
- try:
- response = group.Delete()
- except CLCException as ex:
- self.module.fail_json(msg='Failed to delete group :{0}. {1}'.format(
- group_name, ex.response_text
- ))
- return response
-
- def _ensure_group_is_present(
- self,
- group_name,
- parent_name,
- group_description):
- """
- Checks to see if a server group exists, creates it if it doesn't.
- :param group_name: the name of the group to validate/create
- :param parent_name: the name of the parent group for group_name
- :param group_description: a short description of the server group (used when creating)
- :return: (changed, group) -
- changed: Boolean- whether a change was made,
- group: A clc group object for the group
- """
- if not self.root_group:
- raise AssertionError("Implementation Error: Root Group not set")
- parent = parent_name if parent_name is not None else self.root_group.name
- description = group_description
- changed = False
- group = group_name
-
- parent_exists = self._group_exists(group_name=parent, parent_name=None)
- child_exists = self._group_exists(
- group_name=group_name,
- parent_name=parent)
-
- if parent_exists and child_exists:
- group, parent = self.group_dict[group_name]
- changed = False
- elif parent_exists and not child_exists:
- if not self.module.check_mode:
- group = self._create_group(
- group=group,
- parent=parent,
- description=description)
- changed = True
- else:
- self.module.fail_json(
- msg="parent group: " +
- parent +
- " does not exist")
-
- return changed, group
-
- def _create_group(self, group, parent, description):
- """
- Create the provided server group
- :param group: clc_sdk.Group - the group to create
- :param parent: clc_sdk.Parent - the parent group for {group}
- :param description: string - a text description of the group
- :return: clc_sdk.Group - the created group
- """
- response = None
- (parent, grandparent) = self.group_dict[parent]
- try:
- response = parent.Create(name=group, description=description)
- except CLCException as ex:
- self.module.fail_json(msg='Failed to create group :{0}. {1}'.format(
- group, ex.response_text))
- return response
-
- def _group_exists(self, group_name, parent_name):
- """
- Check to see if a group exists
- :param group_name: string - the group to check
- :param parent_name: string - the parent of group_name
- :return: boolean - whether the group exists
- """
- result = False
- if group_name in self.group_dict:
- (group, parent) = self.group_dict[group_name]
- if parent_name is None or parent_name == parent.name:
- result = True
- return result
-
- def _get_group_tree_for_datacenter(self, datacenter=None):
- """
- Walk the tree of groups for a datacenter
- :param datacenter: string - the datacenter to walk (ex: 'UC1')
- :return: a dictionary of groups and parents
- """
- self.root_group = self.clc.v2.Datacenter(
- location=datacenter).RootGroup()
- return self._walk_groups_recursive(
- parent_group=None,
- child_group=self.root_group)
-
- def _walk_groups_recursive(self, parent_group, child_group):
- """
- Walk a parent-child tree of groups, starting with the provided child group
- :param parent_group: clc_sdk.Group - the parent group to start the walk
- :param child_group: clc_sdk.Group - the child group to start the walk
- :return: a dictionary of groups and parents
- """
- result = {str(child_group): (child_group, parent_group)}
- groups = child_group.Subgroups().groups
- if len(groups) > 0:
- for group in groups:
- if group.type != 'default':
- continue
-
- result.update(self._walk_groups_recursive(child_group, group))
- return result
-
- def _wait_for_requests_to_complete(self, requests_lst):
- """
- Waits until the CLC requests are complete if the wait argument is True
- :param requests_lst: The list of CLC request objects
- :return: none
- """
- if not self.module.params['wait']:
- return
- for request in requests_lst:
- request.WaitUntilComplete()
- for request_details in request.requests:
- if request_details.Status() != 'succeeded':
- self.module.fail_json(
- msg='Unable to process group request')
-
- @staticmethod
- def _set_user_agent(clc):
- if hasattr(clc, 'SetRequestsSession'):
- agent_string = "ClcAnsibleModule/" + __version__
- ses = requests.Session()
- ses.headers.update({"Api-Client": agent_string})
- ses.headers['User-Agent'] += " " + agent_string
- clc.SetRequestsSession(ses)
-
-
-def main():
- """
- The main function. Instantiates the module and calls process_request.
- :return: none
- """
- module = AnsibleModule(
- argument_spec=ClcGroup._define_module_argument_spec(),
- supports_check_mode=True)
-
- clc_group = ClcGroup(module)
- clc_group.process_request()
-
-
-if __name__ == '__main__':
- main()
diff --git a/plugins/modules/clc_loadbalancer.py b/plugins/modules/clc_loadbalancer.py
deleted file mode 100644
index b7db65136d..0000000000
--- a/plugins/modules/clc_loadbalancer.py
+++ /dev/null
@@ -1,938 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-#
-# Copyright (c) 2015 CenturyLink
-# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = r"""
-module: clc_loadbalancer
-short_description: Create, Delete shared loadbalancers in CenturyLink Cloud
-description:
- - An Ansible module to Create, Delete shared loadbalancers in CenturyLink Cloud.
-deprecated:
- removed_in: 11.0.0
- why: >
- Lumen Public Cloud (formerly known as CenturyLink Cloud) has gone End-of-Life in September 2023.
- See more at U(https://www.ctl.io/knowledge-base/release-notes/2023/lumen-public-cloud-platform-end-of-life-notice/?).
- alternative: There is none.
-extends_documentation_fragment:
- - community.general.attributes
- - community.general.clc
-author:
- - "CLC Runner (@clc-runner)"
-attributes:
- check_mode:
- support: full
- diff_mode:
- support: none
-options:
- name:
- description:
- - The name of the loadbalancer.
- type: str
- required: true
- description:
- description:
- - A description for the loadbalancer.
- type: str
- alias:
- description:
- - The alias of your CLC Account.
- type: str
- required: true
- location:
- description:
- - The location of the datacenter where the load balancer resides in.
- type: str
- required: true
- method:
- description:
- - The balancing method for the load balancer pool.
- type: str
- choices: ['leastConnection', 'roundRobin']
- persistence:
- description:
- - The persistence method for the load balancer.
- type: str
- choices: ['standard', 'sticky']
- port:
- description:
- - Port to configure on the public-facing side of the load balancer pool.
- type: str
- choices: ['80', '443']
- nodes:
- description:
- - A list of nodes that needs to be added to the load balancer pool.
- type: list
- default: []
- elements: dict
- status:
- description:
- - The status of the loadbalancer.
- type: str
- default: enabled
- choices: ['enabled', 'disabled']
- state:
- description:
- - Whether to create or delete the load balancer pool.
- type: str
- default: present
- choices: ['present', 'absent', 'port_absent', 'nodes_present', 'nodes_absent']
-"""
-
-EXAMPLES = r"""
-# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples
-- name: Create Loadbalancer
- hosts: localhost
- connection: local
- tasks:
- - name: Actually Create things
- community.general.clc_loadbalancer:
- name: test
- description: test
- alias: TEST
- location: WA1
- port: 443
- nodes:
- - ipAddress: 10.11.22.123
- privatePort: 80
- state: present
-
-- name: Add node to an existing loadbalancer pool
- hosts: localhost
- connection: local
- tasks:
- - name: Actually Create things
- community.general.clc_loadbalancer:
- name: test
- description: test
- alias: TEST
- location: WA1
- port: 443
- nodes:
- - ipAddress: 10.11.22.234
- privatePort: 80
- state: nodes_present
-
-- name: Remove node from an existing loadbalancer pool
- hosts: localhost
- connection: local
- tasks:
- - name: Actually Create things
- community.general.clc_loadbalancer:
- name: test
- description: test
- alias: TEST
- location: WA1
- port: 443
- nodes:
- - ipAddress: 10.11.22.234
- privatePort: 80
- state: nodes_absent
-
-- name: Delete LoadbalancerPool
- hosts: localhost
- connection: local
- tasks:
- - name: Actually Delete things
- community.general.clc_loadbalancer:
- name: test
- description: test
- alias: TEST
- location: WA1
- port: 443
- nodes:
- - ipAddress: 10.11.22.123
- privatePort: 80
- state: port_absent
-
-- name: Delete Loadbalancer
- hosts: localhost
- connection: local
- tasks:
- - name: Actually Delete things
- community.general.clc_loadbalancer:
- name: test
- description: test
- alias: TEST
- location: WA1
- port: 443
- nodes:
- - ipAddress: 10.11.22.123
- privatePort: 80
- state: absent
-"""
-
-RETURN = r"""
-loadbalancer:
- description: The load balancer result object from CLC.
- returned: success
- type: dict
- sample:
- {
- "description":"test-lb",
- "id":"ab5b18cb81e94ab9925b61d1ca043fb5",
- "ipAddress":"66.150.174.197",
- "links":[
- {
- "href":"/v2/sharedLoadBalancers/wfad/wa1/ab5b18cb81e94ab9925b61d1ca043fb5",
- "rel":"self",
- "verbs":[
- "GET",
- "PUT",
- "DELETE"
- ]
- },
- {
- "href":"/v2/sharedLoadBalancers/wfad/wa1/ab5b18cb81e94ab9925b61d1ca043fb5/pools",
- "rel":"pools",
- "verbs":[
- "GET",
- "POST"
- ]
- }
- ],
- "name":"test-lb",
- "pools":[
-
- ],
- "status":"enabled"
- }
-"""
-
-__version__ = '${version}'
-
-import json
-import os
-import traceback
-from time import sleep
-
-from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
-
-REQUESTS_IMP_ERR = None
-try:
- import requests
-except ImportError:
- REQUESTS_IMP_ERR = traceback.format_exc()
- REQUESTS_FOUND = False
-else:
- REQUESTS_FOUND = True
-
-#
-# Requires the clc-python-sdk.
-# sudo pip install clc-sdk
-#
-CLC_IMP_ERR = None
-try:
- import clc as clc_sdk
- from clc import APIFailedResponse
-except ImportError:
- CLC_IMP_ERR = traceback.format_exc()
- CLC_FOUND = False
- clc_sdk = None
-else:
- CLC_FOUND = True
-
-from ansible.module_utils.basic import AnsibleModule, missing_required_lib
-
-
-class ClcLoadBalancer:
-
- clc = None
-
- def __init__(self, module):
- """
- Construct module
- """
- self.clc = clc_sdk
- self.module = module
- self.lb_dict = {}
-
- if not CLC_FOUND:
- self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR)
- if not REQUESTS_FOUND:
- self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR)
- if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'):
- self.module.fail_json(
- msg='requests library version should be >= 2.5.0')
-
- self._set_user_agent(self.clc)
-
- def process_request(self):
- """
- Execute the main code path, and handle the request
- :return: none
- """
- changed = False
- result_lb = None
- loadbalancer_name = self.module.params.get('name')
- loadbalancer_alias = self.module.params.get('alias')
- loadbalancer_location = self.module.params.get('location')
- loadbalancer_description = self.module.params.get('description')
- loadbalancer_port = self.module.params.get('port')
- loadbalancer_method = self.module.params.get('method')
- loadbalancer_persistence = self.module.params.get('persistence')
- loadbalancer_nodes = self.module.params.get('nodes')
- loadbalancer_status = self.module.params.get('status')
- state = self.module.params.get('state')
-
- if loadbalancer_description is None:
- loadbalancer_description = loadbalancer_name
-
- self._set_clc_credentials_from_env()
-
- self.lb_dict = self._get_loadbalancer_list(
- alias=loadbalancer_alias,
- location=loadbalancer_location)
-
- if state == 'present':
- changed, result_lb, lb_id = self.ensure_loadbalancer_present(
- name=loadbalancer_name,
- alias=loadbalancer_alias,
- location=loadbalancer_location,
- description=loadbalancer_description,
- status=loadbalancer_status)
- if loadbalancer_port:
- changed, result_pool, pool_id = self.ensure_loadbalancerpool_present(
- lb_id=lb_id,
- alias=loadbalancer_alias,
- location=loadbalancer_location,
- method=loadbalancer_method,
- persistence=loadbalancer_persistence,
- port=loadbalancer_port)
-
- if loadbalancer_nodes:
- changed, result_nodes = self.ensure_lbpool_nodes_set(
- alias=loadbalancer_alias,
- location=loadbalancer_location,
- name=loadbalancer_name,
- port=loadbalancer_port,
- nodes=loadbalancer_nodes)
- elif state == 'absent':
- changed, result_lb = self.ensure_loadbalancer_absent(
- name=loadbalancer_name,
- alias=loadbalancer_alias,
- location=loadbalancer_location)
-
- elif state == 'port_absent':
- changed, result_lb = self.ensure_loadbalancerpool_absent(
- alias=loadbalancer_alias,
- location=loadbalancer_location,
- name=loadbalancer_name,
- port=loadbalancer_port)
-
- elif state == 'nodes_present':
- changed, result_lb = self.ensure_lbpool_nodes_present(
- alias=loadbalancer_alias,
- location=loadbalancer_location,
- name=loadbalancer_name,
- port=loadbalancer_port,
- nodes=loadbalancer_nodes)
-
- elif state == 'nodes_absent':
- changed, result_lb = self.ensure_lbpool_nodes_absent(
- alias=loadbalancer_alias,
- location=loadbalancer_location,
- name=loadbalancer_name,
- port=loadbalancer_port,
- nodes=loadbalancer_nodes)
-
- self.module.exit_json(changed=changed, loadbalancer=result_lb)
-
- def ensure_loadbalancer_present(
- self, name, alias, location, description, status):
- """
- Checks to see if a load balancer exists and creates one if it does not.
- :param name: Name of loadbalancer
- :param alias: Alias of account
- :param location: Datacenter
- :param description: Description of loadbalancer
- :param status: Enabled / Disabled
- :return: (changed, result, lb_id)
- changed: Boolean whether a change was made
- result: The result object from the CLC load balancer request
- lb_id: The load balancer id
- """
- changed = False
- result = name
- lb_id = self._loadbalancer_exists(name=name)
- if not lb_id:
- if not self.module.check_mode:
- result = self.create_loadbalancer(name=name,
- alias=alias,
- location=location,
- description=description,
- status=status)
- lb_id = result.get('id')
- changed = True
-
- return changed, result, lb_id
-
- def ensure_loadbalancerpool_present(
- self, lb_id, alias, location, method, persistence, port):
- """
- Checks to see if a load balancer pool exists and creates one if it does not.
- :param lb_id: The loadbalancer id
- :param alias: The account alias
- :param location: the datacenter the load balancer resides in
- :param method: the load balancing method
- :param persistence: the load balancing persistence type
- :param port: the port that the load balancer will listen on
- :return: (changed, group, pool_id) -
- changed: Boolean whether a change was made
- result: The result from the CLC API call
- pool_id: The string id of the load balancer pool
- """
- changed = False
- result = port
- if not lb_id:
- return changed, None, None
- pool_id = self._loadbalancerpool_exists(
- alias=alias,
- location=location,
- port=port,
- lb_id=lb_id)
- if not pool_id:
- if not self.module.check_mode:
- result = self.create_loadbalancerpool(
- alias=alias,
- location=location,
- lb_id=lb_id,
- method=method,
- persistence=persistence,
- port=port)
- pool_id = result.get('id')
- changed = True
-
- return changed, result, pool_id
-
- def ensure_loadbalancer_absent(self, name, alias, location):
- """
- Checks to see if a load balancer exists and deletes it if it does
- :param name: Name of the load balancer
- :param alias: Alias of account
- :param location: Datacenter
- :return: (changed, result)
- changed: Boolean whether a change was made
- result: The result from the CLC API Call
- """
- changed = False
- result = name
- lb_exists = self._loadbalancer_exists(name=name)
- if lb_exists:
- if not self.module.check_mode:
- result = self.delete_loadbalancer(alias=alias,
- location=location,
- name=name)
- changed = True
- return changed, result
-
- def ensure_loadbalancerpool_absent(self, alias, location, name, port):
- """
- Checks to see if a load balancer pool exists and deletes it if it does
- :param alias: The account alias
- :param location: the datacenter the load balancer resides in
- :param name: the name of the load balancer
- :param port: the port that the load balancer listens on
- :return: (changed, result) -
- changed: Boolean whether a change was made
- result: The result from the CLC API call
- """
- changed = False
- result = None
- lb_exists = self._loadbalancer_exists(name=name)
- if lb_exists:
- lb_id = self._get_loadbalancer_id(name=name)
- pool_id = self._loadbalancerpool_exists(
- alias=alias,
- location=location,
- port=port,
- lb_id=lb_id)
- if pool_id:
- changed = True
- if not self.module.check_mode:
- result = self.delete_loadbalancerpool(
- alias=alias,
- location=location,
- lb_id=lb_id,
- pool_id=pool_id)
- else:
- result = "Pool doesn't exist"
- else:
- result = "LB Doesn't Exist"
- return changed, result
-
- def ensure_lbpool_nodes_set(self, alias, location, name, port, nodes):
- """
- Checks to see if the provided list of nodes exist for the pool
- and set the nodes if any in the list those doesn't exist
- :param alias: The account alias
- :param location: the datacenter the load balancer resides in
- :param name: the name of the load balancer
- :param port: the port that the load balancer will listen on
- :param nodes: The list of nodes to be updated to the pool
- :return: (changed, result) -
- changed: Boolean whether a change was made
- result: The result from the CLC API call
- """
- result = {}
- changed = False
- lb_exists = self._loadbalancer_exists(name=name)
- if lb_exists:
- lb_id = self._get_loadbalancer_id(name=name)
- pool_id = self._loadbalancerpool_exists(
- alias=alias,
- location=location,
- port=port,
- lb_id=lb_id)
- if pool_id:
- nodes_exist = self._loadbalancerpool_nodes_exists(alias=alias,
- location=location,
- lb_id=lb_id,
- pool_id=pool_id,
- nodes_to_check=nodes)
- if not nodes_exist:
- changed = True
- result = self.set_loadbalancernodes(alias=alias,
- location=location,
- lb_id=lb_id,
- pool_id=pool_id,
- nodes=nodes)
- else:
- result = "Pool doesn't exist"
- else:
- result = "Load balancer doesn't Exist"
- return changed, result
-
- def ensure_lbpool_nodes_present(self, alias, location, name, port, nodes):
- """
- Checks to see if the provided list of nodes exist for the pool and add the missing nodes to the pool
- :param alias: The account alias
- :param location: the datacenter the load balancer resides in
- :param name: the name of the load balancer
- :param port: the port that the load balancer will listen on
- :param nodes: the list of nodes to be added
- :return: (changed, result) -
- changed: Boolean whether a change was made
- result: The result from the CLC API call
- """
- changed = False
- lb_exists = self._loadbalancer_exists(name=name)
- if lb_exists:
- lb_id = self._get_loadbalancer_id(name=name)
- pool_id = self._loadbalancerpool_exists(
- alias=alias,
- location=location,
- port=port,
- lb_id=lb_id)
- if pool_id:
- changed, result = self.add_lbpool_nodes(alias=alias,
- location=location,
- lb_id=lb_id,
- pool_id=pool_id,
- nodes_to_add=nodes)
- else:
- result = "Pool doesn't exist"
- else:
- result = "Load balancer doesn't Exist"
- return changed, result
-
- def ensure_lbpool_nodes_absent(self, alias, location, name, port, nodes):
- """
- Checks to see if the provided list of nodes exist for the pool and removes them if found any
- :param alias: The account alias
- :param location: the datacenter the load balancer resides in
- :param name: the name of the load balancer
- :param port: the port that the load balancer will listen on
- :param nodes: the list of nodes to be removed
- :return: (changed, result) -
- changed: Boolean whether a change was made
- result: The result from the CLC API call
- """
- changed = False
- lb_exists = self._loadbalancer_exists(name=name)
- if lb_exists:
- lb_id = self._get_loadbalancer_id(name=name)
- pool_id = self._loadbalancerpool_exists(
- alias=alias,
- location=location,
- port=port,
- lb_id=lb_id)
- if pool_id:
- changed, result = self.remove_lbpool_nodes(alias=alias,
- location=location,
- lb_id=lb_id,
- pool_id=pool_id,
- nodes_to_remove=nodes)
- else:
- result = "Pool doesn't exist"
- else:
- result = "Load balancer doesn't Exist"
- return changed, result
-
- def create_loadbalancer(self, name, alias, location, description, status):
- """
- Create a loadbalancer w/ params
- :param name: Name of loadbalancer
- :param alias: Alias of account
- :param location: Datacenter
- :param description: Description for loadbalancer to be created
- :param status: Enabled / Disabled
- :return: result: The result from the CLC API call
- """
- result = None
- try:
- result = self.clc.v2.API.Call('POST',
- '/v2/sharedLoadBalancers/%s/%s' % (alias,
- location),
- json.dumps({"name": name,
- "description": description,
- "status": status}))
- sleep(1)
- except APIFailedResponse as e:
- self.module.fail_json(
- msg='Unable to create load balancer "{0}". {1}'.format(
- name, str(e.response_text)))
- return result
-
- def create_loadbalancerpool(
- self, alias, location, lb_id, method, persistence, port):
- """
- Creates a pool on the provided load balancer
- :param alias: the account alias
- :param location: the datacenter the load balancer resides in
- :param lb_id: the id string of the load balancer
- :param method: the load balancing method
- :param persistence: the load balancing persistence type
- :param port: the port that the load balancer will listen on
- :return: result: The result from the create API call
- """
- result = None
- try:
- result = self.clc.v2.API.Call(
- 'POST', '/v2/sharedLoadBalancers/%s/%s/%s/pools' %
- (alias, location, lb_id), json.dumps(
- {
- "port": port, "method": method, "persistence": persistence
- }))
- except APIFailedResponse as e:
- self.module.fail_json(
- msg='Unable to create pool for load balancer id "{0}". {1}'.format(
- lb_id, str(e.response_text)))
- return result
-
- def delete_loadbalancer(self, alias, location, name):
- """
- Delete CLC loadbalancer
- :param alias: Alias for account
- :param location: Datacenter
- :param name: Name of the loadbalancer to delete
- :return: result: The result from the CLC API call
- """
- result = None
- lb_id = self._get_loadbalancer_id(name=name)
- try:
- result = self.clc.v2.API.Call(
- 'DELETE', '/v2/sharedLoadBalancers/%s/%s/%s' %
- (alias, location, lb_id))
- except APIFailedResponse as e:
- self.module.fail_json(
- msg='Unable to delete load balancer "{0}". {1}'.format(
- name, str(e.response_text)))
- return result
-
- def delete_loadbalancerpool(self, alias, location, lb_id, pool_id):
- """
- Delete the pool on the provided load balancer
- :param alias: The account alias
- :param location: the datacenter the load balancer resides in
- :param lb_id: the id string of the load balancer
- :param pool_id: the id string of the load balancer pool
- :return: result: The result from the delete API call
- """
- result = None
- try:
- result = self.clc.v2.API.Call(
- 'DELETE', '/v2/sharedLoadBalancers/%s/%s/%s/pools/%s' %
- (alias, location, lb_id, pool_id))
- except APIFailedResponse as e:
- self.module.fail_json(
- msg='Unable to delete pool for load balancer id "{0}". {1}'.format(
- lb_id, str(e.response_text)))
- return result
-
- def _get_loadbalancer_id(self, name):
- """
- Retrieves unique ID of loadbalancer
- :param name: Name of loadbalancer
- :return: Unique ID of the loadbalancer
- """
- id = None
- for lb in self.lb_dict:
- if lb.get('name') == name:
- id = lb.get('id')
- return id
-
- def _get_loadbalancer_list(self, alias, location):
- """
- Retrieve a list of loadbalancers
- :param alias: Alias for account
- :param location: Datacenter
- :return: JSON data for all loadbalancers at datacenter
- """
- result = None
- try:
- result = self.clc.v2.API.Call(
- 'GET', '/v2/sharedLoadBalancers/%s/%s' % (alias, location))
- except APIFailedResponse as e:
- self.module.fail_json(
- msg='Unable to fetch load balancers for account: {0}. {1}'.format(
- alias, str(e.response_text)))
- return result
-
- def _loadbalancer_exists(self, name):
- """
- Verify a loadbalancer exists
- :param name: Name of loadbalancer
- :return: False or the ID of the existing loadbalancer
- """
- result = False
-
- for lb in self.lb_dict:
- if lb.get('name') == name:
- result = lb.get('id')
- return result
-
- def _loadbalancerpool_exists(self, alias, location, port, lb_id):
- """
- Checks to see if a pool exists on the specified port on the provided load balancer
- :param alias: the account alias
- :param location: the datacenter the load balancer resides in
- :param port: the port to check and see if it exists
- :param lb_id: the id string of the provided load balancer
- :return: result: The id string of the pool or False
- """
- result = False
- try:
- pool_list = self.clc.v2.API.Call(
- 'GET', '/v2/sharedLoadBalancers/%s/%s/%s/pools' %
- (alias, location, lb_id))
- except APIFailedResponse as e:
- return self.module.fail_json(
- msg='Unable to fetch the load balancer pools for for load balancer id: {0}. {1}'.format(
- lb_id, str(e.response_text)))
- for pool in pool_list:
- if int(pool.get('port')) == int(port):
- result = pool.get('id')
- return result
-
- def _loadbalancerpool_nodes_exists(
- self, alias, location, lb_id, pool_id, nodes_to_check):
- """
- Checks to see if a set of nodes exists on the specified port on the provided load balancer
- :param alias: the account alias
- :param location: the datacenter the load balancer resides in
- :param lb_id: the id string of the provided load balancer
- :param pool_id: the id string of the load balancer pool
- :param nodes_to_check: the list of nodes to check for
- :return: result: True / False indicating if the given nodes exist
- """
- result = False
- nodes = self._get_lbpool_nodes(alias, location, lb_id, pool_id)
- for node in nodes_to_check:
- if not node.get('status'):
- node['status'] = 'enabled'
- if node in nodes:
- result = True
- else:
- result = False
- return result
-
- def set_loadbalancernodes(self, alias, location, lb_id, pool_id, nodes):
- """
- Updates nodes to the provided pool
- :param alias: the account alias
- :param location: the datacenter the load balancer resides in
- :param lb_id: the id string of the load balancer
- :param pool_id: the id string of the pool
- :param nodes: a list of dictionaries containing the nodes to set
- :return: result: The result from the CLC API call
- """
- result = None
- if not lb_id:
- return result
- if not self.module.check_mode:
- try:
- result = self.clc.v2.API.Call('PUT',
- '/v2/sharedLoadBalancers/%s/%s/%s/pools/%s/nodes'
- % (alias, location, lb_id, pool_id), json.dumps(nodes))
- except APIFailedResponse as e:
- self.module.fail_json(
- msg='Unable to set nodes for the load balancer pool id "{0}". {1}'.format(
- pool_id, str(e.response_text)))
- return result
-
- def add_lbpool_nodes(self, alias, location, lb_id, pool_id, nodes_to_add):
- """
- Add nodes to the provided pool
- :param alias: the account alias
- :param location: the datacenter the load balancer resides in
- :param lb_id: the id string of the load balancer
- :param pool_id: the id string of the pool
- :param nodes_to_add: a list of dictionaries containing the nodes to add
- :return: (changed, result) -
- changed: Boolean whether a change was made
- result: The result from the CLC API call
- """
- changed = False
- result = {}
- nodes = self._get_lbpool_nodes(alias, location, lb_id, pool_id)
- for node in nodes_to_add:
- if not node.get('status'):
- node['status'] = 'enabled'
- if node not in nodes:
- changed = True
- nodes.append(node)
- if changed is True and not self.module.check_mode:
- result = self.set_loadbalancernodes(
- alias,
- location,
- lb_id,
- pool_id,
- nodes)
- return changed, result
-
- def remove_lbpool_nodes(
- self, alias, location, lb_id, pool_id, nodes_to_remove):
- """
- Removes nodes from the provided pool
- :param alias: the account alias
- :param location: the datacenter the load balancer resides in
- :param lb_id: the id string of the load balancer
- :param pool_id: the id string of the pool
- :param nodes_to_remove: a list of dictionaries containing the nodes to remove
- :return: (changed, result) -
- changed: Boolean whether a change was made
- result: The result from the CLC API call
- """
- changed = False
- result = {}
- nodes = self._get_lbpool_nodes(alias, location, lb_id, pool_id)
- for node in nodes_to_remove:
- if not node.get('status'):
- node['status'] = 'enabled'
- if node in nodes:
- changed = True
- nodes.remove(node)
- if changed is True and not self.module.check_mode:
- result = self.set_loadbalancernodes(
- alias,
- location,
- lb_id,
- pool_id,
- nodes)
- return changed, result
-
- def _get_lbpool_nodes(self, alias, location, lb_id, pool_id):
- """
- Return the list of nodes available to the provided load balancer pool
- :param alias: the account alias
- :param location: the datacenter the load balancer resides in
- :param lb_id: the id string of the load balancer
- :param pool_id: the id string of the pool
- :return: result: The list of nodes
- """
- result = None
- try:
- result = self.clc.v2.API.Call('GET',
- '/v2/sharedLoadBalancers/%s/%s/%s/pools/%s/nodes'
- % (alias, location, lb_id, pool_id))
- except APIFailedResponse as e:
- self.module.fail_json(
- msg='Unable to fetch list of available nodes for load balancer pool id: {0}. {1}'.format(
- pool_id, str(e.response_text)))
- return result
-
- @staticmethod
- def define_argument_spec():
- """
- Define the argument spec for the ansible module
- :return: argument spec dictionary
- """
- argument_spec = dict(
- name=dict(required=True),
- description=dict(),
- location=dict(required=True),
- alias=dict(required=True),
- port=dict(choices=[80, 443]),
- method=dict(choices=['leastConnection', 'roundRobin']),
- persistence=dict(choices=['standard', 'sticky']),
- nodes=dict(type='list', default=[], elements='dict'),
- status=dict(default='enabled', choices=['enabled', 'disabled']),
- state=dict(
- default='present',
- choices=[
- 'present',
- 'absent',
- 'port_absent',
- 'nodes_present',
- 'nodes_absent'])
- )
- return argument_spec
-
- def _set_clc_credentials_from_env(self):
- """
- Set the CLC Credentials on the sdk by reading environment variables
- :return: none
- """
- env = os.environ
- v2_api_token = env.get('CLC_V2_API_TOKEN', False)
- v2_api_username = env.get('CLC_V2_API_USERNAME', False)
- v2_api_passwd = env.get('CLC_V2_API_PASSWD', False)
- clc_alias = env.get('CLC_ACCT_ALIAS', False)
- api_url = env.get('CLC_V2_API_URL', False)
-
- if api_url:
- self.clc.defaults.ENDPOINT_URL_V2 = api_url
-
- if v2_api_token and clc_alias:
- self.clc._LOGIN_TOKEN_V2 = v2_api_token
- self.clc._V2_ENABLED = True
- self.clc.ALIAS = clc_alias
- elif v2_api_username and v2_api_passwd:
- self.clc.v2.SetCredentials(
- api_username=v2_api_username,
- api_passwd=v2_api_passwd)
- else:
- return self.module.fail_json(
- msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD "
- "environment variables")
-
- @staticmethod
- def _set_user_agent(clc):
- if hasattr(clc, 'SetRequestsSession'):
- agent_string = "ClcAnsibleModule/" + __version__
- ses = requests.Session()
- ses.headers.update({"Api-Client": agent_string})
- ses.headers['User-Agent'] += " " + agent_string
- clc.SetRequestsSession(ses)
-
-
-def main():
- """
- The main function. Instantiates the module and calls process_request.
- :return: none
- """
- module = AnsibleModule(argument_spec=ClcLoadBalancer.define_argument_spec(),
- supports_check_mode=True)
- clc_loadbalancer = ClcLoadBalancer(module)
- clc_loadbalancer.process_request()
-
-
-if __name__ == '__main__':
- main()
diff --git a/plugins/modules/clc_modify_server.py b/plugins/modules/clc_modify_server.py
deleted file mode 100644
index f40379e748..0000000000
--- a/plugins/modules/clc_modify_server.py
+++ /dev/null
@@ -1,961 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-#
-# Copyright (c) 2015 CenturyLink
-# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = r"""
-module: clc_modify_server
-short_description: Modify servers in CenturyLink Cloud
-description:
- - An Ansible module to modify servers in CenturyLink Cloud.
-deprecated:
- removed_in: 11.0.0
- why: >
- Lumen Public Cloud (formerly known as CenturyLink Cloud) has gone End-of-Life in September 2023.
- See more at U(https://www.ctl.io/knowledge-base/release-notes/2023/lumen-public-cloud-platform-end-of-life-notice/?).
- alternative: There is none.
-extends_documentation_fragment:
- - community.general.attributes
- - community.general.clc
-author:
- - "CLC Runner (@clc-runner)"
-attributes:
- check_mode:
- support: full
- diff_mode:
- support: none
-options:
- server_ids:
- description:
- - A list of server IDs to modify.
- type: list
- required: true
- elements: str
- cpu:
- description:
- - How many CPUs to update on the server.
- type: str
- memory:
- description:
- - Memory (in GB) to set to the server.
- type: str
- anti_affinity_policy_id:
- description:
- - The anti affinity policy ID to be set for a hyper scale server. This is mutually exclusive with O(anti_affinity_policy_name).
- type: str
- anti_affinity_policy_name:
- description:
- - The anti affinity policy name to be set for a hyper scale server. This is mutually exclusive with O(anti_affinity_policy_id).
- type: str
- alert_policy_id:
- description:
- - The alert policy ID to be associated to the server. This is mutually exclusive with O(alert_policy_name).
- type: str
- alert_policy_name:
- description:
- - The alert policy name to be associated to the server. This is mutually exclusive with O(alert_policy_id).
- type: str
- state:
- description:
- - The state to insure that the provided resources are in.
- type: str
- default: 'present'
- choices: ['present', 'absent']
- wait:
- description:
- - Whether to wait for the provisioning tasks to finish before returning.
- type: bool
- default: true
-"""
-
-EXAMPLES = r"""
-# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples
-
-- name: Set the cpu count to 4 on a server
- community.general.clc_modify_server:
- server_ids:
- - UC1TESTSVR01
- - UC1TESTSVR02
- cpu: 4
- state: present
-
-- name: Set the memory to 8GB on a server
- community.general.clc_modify_server:
- server_ids:
- - UC1TESTSVR01
- - UC1TESTSVR02
- memory: 8
- state: present
-
-- name: Set the anti affinity policy on a server
- community.general.clc_modify_server:
- server_ids:
- - UC1TESTSVR01
- - UC1TESTSVR02
- anti_affinity_policy_name: 'aa_policy'
- state: present
-
-- name: Remove the anti affinity policy on a server
- community.general.clc_modify_server:
- server_ids:
- - UC1TESTSVR01
- - UC1TESTSVR02
- anti_affinity_policy_name: 'aa_policy'
- state: absent
-
-- name: Add the alert policy on a server
- community.general.clc_modify_server:
- server_ids:
- - UC1TESTSVR01
- - UC1TESTSVR02
- alert_policy_name: 'alert_policy'
- state: present
-
-- name: Remove the alert policy on a server
- community.general.clc_modify_server:
- server_ids:
- - UC1TESTSVR01
- - UC1TESTSVR02
- alert_policy_name: 'alert_policy'
- state: absent
-
-- name: Ret the memory to 16GB and cpu to 8 core on a lust if servers
- community.general.clc_modify_server:
- server_ids:
- - UC1TESTSVR01
- - UC1TESTSVR02
- cpu: 8
- memory: 16
- state: present
-"""
-
-RETURN = r"""
-server_ids:
- description: The list of server IDs that are changed.
- returned: success
- type: list
- sample: ["UC1TEST-SVR01", "UC1TEST-SVR02"]
-servers:
- description: The list of server objects that are changed.
- returned: success
- type: list
- sample:
- [
- {
- "changeInfo":{
- "createdBy":"service.wfad",
- "createdDate":1438196820,
- "modifiedBy":"service.wfad",
- "modifiedDate":1438196820
- },
- "description":"test-server",
- "details":{
- "alertPolicies":[
-
- ],
- "cpu":1,
- "customFields":[
-
- ],
- "diskCount":3,
- "disks":[
- {
- "id":"0:0",
- "partitionPaths":[
-
- ],
- "sizeGB":1
- },
- {
- "id":"0:1",
- "partitionPaths":[
-
- ],
- "sizeGB":2
- },
- {
- "id":"0:2",
- "partitionPaths":[
-
- ],
- "sizeGB":14
- }
- ],
- "hostName":"",
- "inMaintenanceMode":false,
- "ipAddresses":[
- {
- "internal":"10.1.1.1"
- }
- ],
- "memoryGB":1,
- "memoryMB":1024,
- "partitions":[
-
- ],
- "powerState":"started",
- "snapshots":[
-
- ],
- "storageGB":17
- },
- "groupId":"086ac1dfe0b6411989e8d1b77c4065f0",
- "id":"test-server",
- "ipaddress":"10.120.45.23",
- "isTemplate":false,
- "links":[
- {
- "href":"/v2/servers/wfad/test-server",
- "id":"test-server",
- "rel":"self",
- "verbs":[
- "GET",
- "PATCH",
- "DELETE"
- ]
- },
- {
- "href":"/v2/groups/wfad/086ac1dfe0b6411989e8d1b77c4065f0",
- "id":"086ac1dfe0b6411989e8d1b77c4065f0",
- "rel":"group"
- },
- {
- "href":"/v2/accounts/wfad",
- "id":"wfad",
- "rel":"account"
- },
- {
- "href":"/v2/billing/wfad/serverPricing/test-server",
- "rel":"billing"
- },
- {
- "href":"/v2/servers/wfad/test-server/publicIPAddresses",
- "rel":"publicIPAddresses",
- "verbs":[
- "POST"
- ]
- },
- {
- "href":"/v2/servers/wfad/test-server/credentials",
- "rel":"credentials"
- },
- {
- "href":"/v2/servers/wfad/test-server/statistics",
- "rel":"statistics"
- },
- {
- "href":"/v2/servers/wfad/510ec21ae82d4dc89d28479753bf736a/upcomingScheduledActivities",
- "rel":"upcomingScheduledActivities"
- },
- {
- "href":"/v2/servers/wfad/510ec21ae82d4dc89d28479753bf736a/scheduledActivities",
- "rel":"scheduledActivities",
- "verbs":[
- "GET",
- "POST"
- ]
- },
- {
- "href":"/v2/servers/wfad/test-server/capabilities",
- "rel":"capabilities"
- },
- {
- "href":"/v2/servers/wfad/test-server/alertPolicies",
- "rel":"alertPolicyMappings",
- "verbs":[
- "POST"
- ]
- },
- {
- "href":"/v2/servers/wfad/test-server/antiAffinityPolicy",
- "rel":"antiAffinityPolicyMapping",
- "verbs":[
- "PUT",
- "DELETE"
- ]
- },
- {
- "href":"/v2/servers/wfad/test-server/cpuAutoscalePolicy",
- "rel":"cpuAutoscalePolicyMapping",
- "verbs":[
- "PUT",
- "DELETE"
- ]
- }
- ],
- "locationId":"UC1",
- "name":"test-server",
- "os":"ubuntu14_64Bit",
- "osType":"Ubuntu 14 64-bit",
- "status":"active",
- "storageType":"standard",
- "type":"standard"
- }
- ]
-"""
-
-__version__ = '${version}'
-
-import json
-import os
-import traceback
-
-from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
-
-REQUESTS_IMP_ERR = None
-try:
- import requests
-except ImportError:
- REQUESTS_IMP_ERR = traceback.format_exc()
- REQUESTS_FOUND = False
-else:
- REQUESTS_FOUND = True
-
-#
-# Requires the clc-python-sdk.
-# sudo pip install clc-sdk
-#
-CLC_IMP_ERR = None
-try:
- import clc as clc_sdk
- from clc import CLCException
- from clc import APIFailedResponse
-except ImportError:
- CLC_IMP_ERR = traceback.format_exc()
- CLC_FOUND = False
- clc_sdk = None
-else:
- CLC_FOUND = True
-
-from ansible.module_utils.basic import AnsibleModule, missing_required_lib
-
-
-class ClcModifyServer:
- clc = clc_sdk
-
- def __init__(self, module):
- """
- Construct module
- """
- self.clc = clc_sdk
- self.module = module
-
- if not CLC_FOUND:
- self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR)
- if not REQUESTS_FOUND:
- self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR)
- if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'):
- self.module.fail_json(
- msg='requests library version should be >= 2.5.0')
-
- self._set_user_agent(self.clc)
-
- def process_request(self):
- """
- Process the request - Main Code Path
- :return: Returns with either an exit_json or fail_json
- """
- self._set_clc_credentials_from_env()
-
- p = self.module.params
- cpu = p.get('cpu')
- memory = p.get('memory')
- state = p.get('state')
- if state == 'absent' and (cpu or memory):
- return self.module.fail_json(
- msg='\'absent\' state is not supported for \'cpu\' and \'memory\' arguments')
-
- server_ids = p['server_ids']
- if not isinstance(server_ids, list):
- return self.module.fail_json(
- msg='server_ids needs to be a list of instances to modify: %s' %
- server_ids)
-
- (changed, server_dict_array, changed_server_ids) = self._modify_servers(
- server_ids=server_ids)
-
- self.module.exit_json(
- changed=changed,
- server_ids=changed_server_ids,
- servers=server_dict_array)
-
- @staticmethod
- def _define_module_argument_spec():
- """
- Define the argument spec for the ansible module
- :return: argument spec dictionary
- """
- argument_spec = dict(
- server_ids=dict(type='list', required=True, elements='str'),
- state=dict(default='present', choices=['present', 'absent']),
- cpu=dict(),
- memory=dict(),
- anti_affinity_policy_id=dict(),
- anti_affinity_policy_name=dict(),
- alert_policy_id=dict(),
- alert_policy_name=dict(),
- wait=dict(type='bool', default=True)
- )
- mutually_exclusive = [
- ['anti_affinity_policy_id', 'anti_affinity_policy_name'],
- ['alert_policy_id', 'alert_policy_name']
- ]
- return {"argument_spec": argument_spec,
- "mutually_exclusive": mutually_exclusive}
-
- def _set_clc_credentials_from_env(self):
- """
- Set the CLC Credentials on the sdk by reading environment variables
- :return: none
- """
- env = os.environ
- v2_api_token = env.get('CLC_V2_API_TOKEN', False)
- v2_api_username = env.get('CLC_V2_API_USERNAME', False)
- v2_api_passwd = env.get('CLC_V2_API_PASSWD', False)
- clc_alias = env.get('CLC_ACCT_ALIAS', False)
- api_url = env.get('CLC_V2_API_URL', False)
-
- if api_url:
- self.clc.defaults.ENDPOINT_URL_V2 = api_url
-
- if v2_api_token and clc_alias:
- self.clc._LOGIN_TOKEN_V2 = v2_api_token
- self.clc._V2_ENABLED = True
- self.clc.ALIAS = clc_alias
- elif v2_api_username and v2_api_passwd:
- self.clc.v2.SetCredentials(
- api_username=v2_api_username,
- api_passwd=v2_api_passwd)
- else:
- return self.module.fail_json(
- msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD "
- "environment variables")
-
- def _get_servers_from_clc(self, server_list, message):
- """
- Internal function to fetch list of CLC server objects from a list of server ids
- :param server_list: The list of server ids
- :param message: the error message to throw in case of any error
- :return the list of CLC server objects
- """
- try:
- return self.clc.v2.Servers(server_list).servers
- except CLCException as ex:
- return self.module.fail_json(msg=message + ': %s' % ex.message)
-
- def _modify_servers(self, server_ids):
- """
- modify the servers configuration on the provided list
- :param server_ids: list of servers to modify
- :return: a list of dictionaries with server information about the servers that were modified
- """
- p = self.module.params
- state = p.get('state')
- server_params = {
- 'cpu': p.get('cpu'),
- 'memory': p.get('memory'),
- 'anti_affinity_policy_id': p.get('anti_affinity_policy_id'),
- 'anti_affinity_policy_name': p.get('anti_affinity_policy_name'),
- 'alert_policy_id': p.get('alert_policy_id'),
- 'alert_policy_name': p.get('alert_policy_name'),
- }
- changed = False
- server_changed = False
- aa_changed = False
- ap_changed = False
- server_dict_array = []
- result_server_ids = []
- request_list = []
- changed_servers = []
-
- if not isinstance(server_ids, list) or len(server_ids) < 1:
- return self.module.fail_json(
- msg='server_ids should be a list of servers, aborting')
-
- servers = self._get_servers_from_clc(
- server_ids,
- 'Failed to obtain server list from the CLC API')
- for server in servers:
- if state == 'present':
- server_changed, server_result = self._ensure_server_config(
- server, server_params)
- if server_result:
- request_list.append(server_result)
- aa_changed = self._ensure_aa_policy_present(
- server,
- server_params)
- ap_changed = self._ensure_alert_policy_present(
- server,
- server_params)
- elif state == 'absent':
- aa_changed = self._ensure_aa_policy_absent(
- server,
- server_params)
- ap_changed = self._ensure_alert_policy_absent(
- server,
- server_params)
- if server_changed or aa_changed or ap_changed:
- changed_servers.append(server)
- changed = True
-
- self._wait_for_requests(self.module, request_list)
- self._refresh_servers(self.module, changed_servers)
-
- for server in changed_servers:
- server_dict_array.append(server.data)
- result_server_ids.append(server.id)
-
- return changed, server_dict_array, result_server_ids
-
- def _ensure_server_config(
- self, server, server_params):
- """
- ensures the server is updated with the provided cpu and memory
- :param server: the CLC server object
- :param server_params: the dictionary of server parameters
- :return: (changed, group) -
- changed: Boolean whether a change was made
- result: The result from the CLC API call
- """
- cpu = server_params.get('cpu')
- memory = server_params.get('memory')
- changed = False
- result = None
-
- if not cpu:
- cpu = server.cpu
- if not memory:
- memory = server.memory
- if memory != server.memory or cpu != server.cpu:
- if not self.module.check_mode:
- result = self._modify_clc_server(
- self.clc,
- self.module,
- server.id,
- cpu,
- memory)
- changed = True
- return changed, result
-
- @staticmethod
- def _modify_clc_server(clc, module, server_id, cpu, memory):
- """
- Modify the memory or CPU of a clc server.
- :param clc: the clc-sdk instance to use
- :param module: the AnsibleModule object
- :param server_id: id of the server to modify
- :param cpu: the new cpu value
- :param memory: the new memory value
- :return: the result of CLC API call
- """
- result = None
- acct_alias = clc.v2.Account.GetAlias()
- try:
- # Update the server configuration
- job_obj = clc.v2.API.Call('PATCH',
- 'servers/%s/%s' % (acct_alias,
- server_id),
- json.dumps([{"op": "set",
- "member": "memory",
- "value": memory},
- {"op": "set",
- "member": "cpu",
- "value": cpu}]))
- result = clc.v2.Requests(job_obj)
- except APIFailedResponse as ex:
- module.fail_json(
- msg='Unable to update the server configuration for server : "{0}". {1}'.format(
- server_id, str(ex.response_text)))
- return result
-
- @staticmethod
- def _wait_for_requests(module, request_list):
- """
- Block until server provisioning requests are completed.
- :param module: the AnsibleModule object
- :param request_list: a list of clc-sdk.Request instances
- :return: none
- """
- wait = module.params.get('wait')
- if wait:
- # Requests.WaitUntilComplete() returns the count of failed requests
- failed_requests_count = sum(
- [request.WaitUntilComplete() for request in request_list])
-
- if failed_requests_count > 0:
- module.fail_json(
- msg='Unable to process modify server request')
-
- @staticmethod
- def _refresh_servers(module, servers):
- """
- Loop through a list of servers and refresh them.
- :param module: the AnsibleModule object
- :param servers: list of clc-sdk.Server instances to refresh
- :return: none
- """
- for server in servers:
- try:
- server.Refresh()
- except CLCException as ex:
- module.fail_json(msg='Unable to refresh the server {0}. {1}'.format(
- server.id, ex.message
- ))
-
- def _ensure_aa_policy_present(
- self, server, server_params):
- """
- ensures the server is updated with the provided anti affinity policy
- :param server: the CLC server object
- :param server_params: the dictionary of server parameters
- :return: (changed, group) -
- changed: Boolean whether a change was made
- result: The result from the CLC API call
- """
- changed = False
- acct_alias = self.clc.v2.Account.GetAlias()
-
- aa_policy_id = server_params.get('anti_affinity_policy_id')
- aa_policy_name = server_params.get('anti_affinity_policy_name')
- if not aa_policy_id and aa_policy_name:
- aa_policy_id = self._get_aa_policy_id_by_name(
- self.clc,
- self.module,
- acct_alias,
- aa_policy_name)
- current_aa_policy_id = self._get_aa_policy_id_of_server(
- self.clc,
- self.module,
- acct_alias,
- server.id)
-
- if aa_policy_id and aa_policy_id != current_aa_policy_id:
- self._modify_aa_policy(
- self.clc,
- self.module,
- acct_alias,
- server.id,
- aa_policy_id)
- changed = True
- return changed
-
- def _ensure_aa_policy_absent(
- self, server, server_params):
- """
- ensures the provided anti affinity policy is removed from the server
- :param server: the CLC server object
- :param server_params: the dictionary of server parameters
- :return: (changed, group) -
- changed: Boolean whether a change was made
- result: The result from the CLC API call
- """
- changed = False
- acct_alias = self.clc.v2.Account.GetAlias()
- aa_policy_id = server_params.get('anti_affinity_policy_id')
- aa_policy_name = server_params.get('anti_affinity_policy_name')
- if not aa_policy_id and aa_policy_name:
- aa_policy_id = self._get_aa_policy_id_by_name(
- self.clc,
- self.module,
- acct_alias,
- aa_policy_name)
- current_aa_policy_id = self._get_aa_policy_id_of_server(
- self.clc,
- self.module,
- acct_alias,
- server.id)
-
- if aa_policy_id and aa_policy_id == current_aa_policy_id:
- self._delete_aa_policy(
- self.clc,
- self.module,
- acct_alias,
- server.id)
- changed = True
- return changed
-
- @staticmethod
- def _modify_aa_policy(clc, module, acct_alias, server_id, aa_policy_id):
- """
- modifies the anti affinity policy of the CLC server
- :param clc: the clc-sdk instance to use
- :param module: the AnsibleModule object
- :param acct_alias: the CLC account alias
- :param server_id: the CLC server id
- :param aa_policy_id: the anti affinity policy id
- :return: result: The result from the CLC API call
- """
- result = None
- if not module.check_mode:
- try:
- result = clc.v2.API.Call('PUT',
- 'servers/%s/%s/antiAffinityPolicy' % (
- acct_alias,
- server_id),
- json.dumps({"id": aa_policy_id}))
- except APIFailedResponse as ex:
- module.fail_json(
- msg='Unable to modify anti affinity policy to server : "{0}". {1}'.format(
- server_id, str(ex.response_text)))
- return result
-
- @staticmethod
- def _delete_aa_policy(clc, module, acct_alias, server_id):
- """
- Delete the anti affinity policy of the CLC server
- :param clc: the clc-sdk instance to use
- :param module: the AnsibleModule object
- :param acct_alias: the CLC account alias
- :param server_id: the CLC server id
- :return: result: The result from the CLC API call
- """
- result = None
- if not module.check_mode:
- try:
- result = clc.v2.API.Call('DELETE',
- 'servers/%s/%s/antiAffinityPolicy' % (
- acct_alias,
- server_id),
- json.dumps({}))
- except APIFailedResponse as ex:
- module.fail_json(
- msg='Unable to delete anti affinity policy to server : "{0}". {1}'.format(
- server_id, str(ex.response_text)))
- return result
-
- @staticmethod
- def _get_aa_policy_id_by_name(clc, module, alias, aa_policy_name):
- """
- retrieves the anti affinity policy id of the server based on the name of the policy
- :param clc: the clc-sdk instance to use
- :param module: the AnsibleModule object
- :param alias: the CLC account alias
- :param aa_policy_name: the anti affinity policy name
- :return: aa_policy_id: The anti affinity policy id
- """
- aa_policy_id = None
- try:
- aa_policies = clc.v2.API.Call(method='GET',
- url='antiAffinityPolicies/%s' % alias)
- except APIFailedResponse as ex:
- return module.fail_json(
- msg='Unable to fetch anti affinity policies from account alias : "{0}". {1}'.format(
- alias, str(ex.response_text)))
- for aa_policy in aa_policies.get('items'):
- if aa_policy.get('name') == aa_policy_name:
- if not aa_policy_id:
- aa_policy_id = aa_policy.get('id')
- else:
- return module.fail_json(
- msg='multiple anti affinity policies were found with policy name : %s' % aa_policy_name)
- if not aa_policy_id:
- module.fail_json(
- msg='No anti affinity policy was found with policy name : %s' % aa_policy_name)
- return aa_policy_id
-
- @staticmethod
- def _get_aa_policy_id_of_server(clc, module, alias, server_id):
- """
- retrieves the anti affinity policy id of the server based on the CLC server id
- :param clc: the clc-sdk instance to use
- :param module: the AnsibleModule object
- :param alias: the CLC account alias
- :param server_id: the CLC server id
- :return: aa_policy_id: The anti affinity policy id
- """
- aa_policy_id = None
- try:
- result = clc.v2.API.Call(
- method='GET', url='servers/%s/%s/antiAffinityPolicy' %
- (alias, server_id))
- aa_policy_id = result.get('id')
- except APIFailedResponse as ex:
- if ex.response_status_code != 404:
- module.fail_json(msg='Unable to fetch anti affinity policy for server "{0}". {1}'.format(
- server_id, str(ex.response_text)))
- return aa_policy_id
-
- def _ensure_alert_policy_present(
- self, server, server_params):
- """
- ensures the server is updated with the provided alert policy
- :param server: the CLC server object
- :param server_params: the dictionary of server parameters
- :return: (changed, group) -
- changed: Boolean whether a change was made
- result: The result from the CLC API call
- """
- changed = False
- acct_alias = self.clc.v2.Account.GetAlias()
- alert_policy_id = server_params.get('alert_policy_id')
- alert_policy_name = server_params.get('alert_policy_name')
- if not alert_policy_id and alert_policy_name:
- alert_policy_id = self._get_alert_policy_id_by_name(
- self.clc,
- self.module,
- acct_alias,
- alert_policy_name)
- if alert_policy_id and not self._alert_policy_exists(
- server, alert_policy_id):
- self._add_alert_policy_to_server(
- self.clc,
- self.module,
- acct_alias,
- server.id,
- alert_policy_id)
- changed = True
- return changed
-
- def _ensure_alert_policy_absent(
- self, server, server_params):
- """
- ensures the alert policy is removed from the server
- :param server: the CLC server object
- :param server_params: the dictionary of server parameters
- :return: (changed, group) -
- changed: Boolean whether a change was made
- result: The result from the CLC API call
- """
- changed = False
-
- acct_alias = self.clc.v2.Account.GetAlias()
- alert_policy_id = server_params.get('alert_policy_id')
- alert_policy_name = server_params.get('alert_policy_name')
- if not alert_policy_id and alert_policy_name:
- alert_policy_id = self._get_alert_policy_id_by_name(
- self.clc,
- self.module,
- acct_alias,
- alert_policy_name)
-
- if alert_policy_id and self._alert_policy_exists(
- server, alert_policy_id):
- self._remove_alert_policy_to_server(
- self.clc,
- self.module,
- acct_alias,
- server.id,
- alert_policy_id)
- changed = True
- return changed
-
- @staticmethod
- def _add_alert_policy_to_server(
- clc, module, acct_alias, server_id, alert_policy_id):
- """
- add the alert policy to CLC server
- :param clc: the clc-sdk instance to use
- :param module: the AnsibleModule object
- :param acct_alias: the CLC account alias
- :param server_id: the CLC server id
- :param alert_policy_id: the alert policy id
- :return: result: The result from the CLC API call
- """
- result = None
- if not module.check_mode:
- try:
- result = clc.v2.API.Call('POST',
- 'servers/%s/%s/alertPolicies' % (
- acct_alias,
- server_id),
- json.dumps({"id": alert_policy_id}))
- except APIFailedResponse as ex:
- module.fail_json(msg='Unable to set alert policy to the server : "{0}". {1}'.format(
- server_id, str(ex.response_text)))
- return result
-
- @staticmethod
- def _remove_alert_policy_to_server(
- clc, module, acct_alias, server_id, alert_policy_id):
- """
- remove the alert policy to the CLC server
- :param clc: the clc-sdk instance to use
- :param module: the AnsibleModule object
- :param acct_alias: the CLC account alias
- :param server_id: the CLC server id
- :param alert_policy_id: the alert policy id
- :return: result: The result from the CLC API call
- """
- result = None
- if not module.check_mode:
- try:
- result = clc.v2.API.Call('DELETE',
- 'servers/%s/%s/alertPolicies/%s'
- % (acct_alias, server_id, alert_policy_id))
- except APIFailedResponse as ex:
- module.fail_json(msg='Unable to remove alert policy from the server : "{0}". {1}'.format(
- server_id, str(ex.response_text)))
- return result
-
- @staticmethod
- def _get_alert_policy_id_by_name(clc, module, alias, alert_policy_name):
- """
- retrieves the alert policy id of the server based on the name of the policy
- :param clc: the clc-sdk instance to use
- :param module: the AnsibleModule object
- :param alias: the CLC account alias
- :param alert_policy_name: the alert policy name
- :return: alert_policy_id: The alert policy id
- """
- alert_policy_id = None
- try:
- alert_policies = clc.v2.API.Call(method='GET',
- url='alertPolicies/%s' % alias)
- except APIFailedResponse as ex:
- return module.fail_json(msg='Unable to fetch alert policies for account : "{0}". {1}'.format(
- alias, str(ex.response_text)))
- for alert_policy in alert_policies.get('items'):
- if alert_policy.get('name') == alert_policy_name:
- if not alert_policy_id:
- alert_policy_id = alert_policy.get('id')
- else:
- return module.fail_json(
- msg='multiple alert policies were found with policy name : %s' % alert_policy_name)
- return alert_policy_id
-
- @staticmethod
- def _alert_policy_exists(server, alert_policy_id):
- """
- Checks if the alert policy exists for the server
- :param server: the clc server object
- :param alert_policy_id: the alert policy
- :return: True: if the given alert policy id associated to the server, False otherwise
- """
- result = False
- alert_policies = server.alertPolicies
- if alert_policies:
- for alert_policy in alert_policies:
- if alert_policy.get('id') == alert_policy_id:
- result = True
- return result
-
- @staticmethod
- def _set_user_agent(clc):
- if hasattr(clc, 'SetRequestsSession'):
- agent_string = "ClcAnsibleModule/" + __version__
- ses = requests.Session()
- ses.headers.update({"Api-Client": agent_string})
- ses.headers['User-Agent'] += " " + agent_string
- clc.SetRequestsSession(ses)
-
-
-def main():
- """
- The main function. Instantiates the module and calls process_request.
- :return: none
- """
-
- argument_dict = ClcModifyServer._define_module_argument_spec()
- module = AnsibleModule(supports_check_mode=True, **argument_dict)
- clc_modify_server = ClcModifyServer(module)
- clc_modify_server.process_request()
-
-
-if __name__ == '__main__':
- main()
diff --git a/plugins/modules/clc_publicip.py b/plugins/modules/clc_publicip.py
deleted file mode 100644
index 9053638447..0000000000
--- a/plugins/modules/clc_publicip.py
+++ /dev/null
@@ -1,359 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-#
-# Copyright (c) 2015 CenturyLink
-# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = r"""
-module: clc_publicip
-short_description: Add and Delete public IPs on servers in CenturyLink Cloud
-description:
- - An Ansible module to add or delete public IP addresses on an existing server or servers in CenturyLink Cloud.
-deprecated:
- removed_in: 11.0.0
- why: >
- Lumen Public Cloud (formerly known as CenturyLink Cloud) has gone End-of-Life in September 2023.
- See more at U(https://www.ctl.io/knowledge-base/release-notes/2023/lumen-public-cloud-platform-end-of-life-notice/?).
- alternative: There is none.
-extends_documentation_fragment:
- - community.general.attributes
- - community.general.clc
-author:
- - "CLC Runner (@clc-runner)"
-attributes:
- check_mode:
- support: full
- diff_mode:
- support: none
-options:
- protocol:
- description:
- - The protocol that the public IP will listen for.
- type: str
- default: TCP
- choices: ['TCP', 'UDP', 'ICMP']
- ports:
- description:
- - A list of ports to expose. This is required when O(state=present).
- type: list
- elements: int
- server_ids:
- description:
- - A list of servers to create public IPs on.
- type: list
- required: true
- elements: str
- state:
- description:
- - Determine whether to create or delete public IPs. If V(present) module will not create a second public IP if one already
- exists.
- type: str
- default: present
- choices: ['present', 'absent']
- wait:
- description:
- - Whether to wait for the tasks to finish before returning.
- type: bool
- default: true
-"""
-
-EXAMPLES = r"""
-# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples
-
-- name: Add Public IP to Server
- hosts: localhost
- gather_facts: false
- connection: local
- tasks:
- - name: Create Public IP For Servers
- community.general.clc_publicip:
- protocol: TCP
- ports:
- - 80
- server_ids:
- - UC1TEST-SVR01
- - UC1TEST-SVR02
- state: present
- register: clc
-
- - name: Debug
- ansible.builtin.debug:
- var: clc
-
-- name: Delete Public IP from Server
- hosts: localhost
- gather_facts: false
- connection: local
- tasks:
- - name: Create Public IP For Servers
- community.general.clc_publicip:
- server_ids:
- - UC1TEST-SVR01
- - UC1TEST-SVR02
- state: absent
- register: clc
-
- - name: Debug
- ansible.builtin.debug:
- var: clc
-"""
-
-RETURN = r"""
-server_ids:
- description: The list of server IDs that are changed.
- returned: success
- type: list
- sample: ["UC1TEST-SVR01", "UC1TEST-SVR02"]
-"""
-
-__version__ = '${version}'
-
-import os
-import traceback
-
-from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
-
-REQUESTS_IMP_ERR = None
-try:
- import requests
-except ImportError:
- REQUESTS_IMP_ERR = traceback.format_exc()
- REQUESTS_FOUND = False
-else:
- REQUESTS_FOUND = True
-
-#
-# Requires the clc-python-sdk.
-# sudo pip install clc-sdk
-#
-CLC_IMP_ERR = None
-try:
- import clc as clc_sdk
- from clc import CLCException
-except ImportError:
- CLC_IMP_ERR = traceback.format_exc()
- CLC_FOUND = False
- clc_sdk = None
-else:
- CLC_FOUND = True
-
-from ansible.module_utils.basic import AnsibleModule, missing_required_lib
-
-
-class ClcPublicIp(object):
- clc = clc_sdk
- module = None
-
- def __init__(self, module):
- """
- Construct module
- """
- self.module = module
- if not CLC_FOUND:
- self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR)
- if not REQUESTS_FOUND:
- self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR)
- if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'):
- self.module.fail_json(
- msg='requests library version should be >= 2.5.0')
-
- self._set_user_agent(self.clc)
-
- def process_request(self):
- """
- Process the request - Main Code Path
- :return: Returns with either an exit_json or fail_json
- """
- self._set_clc_credentials_from_env()
- params = self.module.params
- server_ids = params['server_ids']
- ports = params['ports']
- protocol = params['protocol']
- state = params['state']
-
- if state == 'present':
- changed, changed_server_ids, requests = self.ensure_public_ip_present(
- server_ids=server_ids, protocol=protocol, ports=ports)
- elif state == 'absent':
- changed, changed_server_ids, requests = self.ensure_public_ip_absent(
- server_ids=server_ids)
- else:
- return self.module.fail_json(msg="Unknown State: " + state)
- self._wait_for_requests_to_complete(requests)
- return self.module.exit_json(changed=changed,
- server_ids=changed_server_ids)
-
- @staticmethod
- def _define_module_argument_spec():
- """
- Define the argument spec for the ansible module
- :return: argument spec dictionary
- """
- argument_spec = dict(
- server_ids=dict(type='list', required=True, elements='str'),
- protocol=dict(default='TCP', choices=['TCP', 'UDP', 'ICMP']),
- ports=dict(type='list', elements='int'),
- wait=dict(type='bool', default=True),
- state=dict(default='present', choices=['present', 'absent']),
- )
- return argument_spec
-
- def ensure_public_ip_present(self, server_ids, protocol, ports):
- """
- Ensures the given server ids having the public ip available
- :param server_ids: the list of server ids
- :param protocol: the ip protocol
- :param ports: the list of ports to expose
- :return: (changed, changed_server_ids, results)
- changed: A flag indicating if there is any change
- changed_server_ids : the list of server ids that are changed
- results: The result list from clc public ip call
- """
- changed = False
- results = []
- changed_server_ids = []
- servers = self._get_servers_from_clc(
- server_ids,
- 'Failed to obtain server list from the CLC API')
- servers_to_change = [
- server for server in servers if len(
- server.PublicIPs().public_ips) == 0]
- ports_to_expose = [{'protocol': protocol, 'port': port}
- for port in ports]
- for server in servers_to_change:
- if not self.module.check_mode:
- result = self._add_publicip_to_server(server, ports_to_expose)
- results.append(result)
- changed_server_ids.append(server.id)
- changed = True
- return changed, changed_server_ids, results
-
- def _add_publicip_to_server(self, server, ports_to_expose):
- result = None
- try:
- result = server.PublicIPs().Add(ports_to_expose)
- except CLCException as ex:
- self.module.fail_json(msg='Failed to add public ip to the server : {0}. {1}'.format(
- server.id, ex.response_text
- ))
- return result
-
- def ensure_public_ip_absent(self, server_ids):
- """
- Ensures the given server ids having the public ip removed if there is any
- :param server_ids: the list of server ids
- :return: (changed, changed_server_ids, results)
- changed: A flag indicating if there is any change
- changed_server_ids : the list of server ids that are changed
- results: The result list from clc public ip call
- """
- changed = False
- results = []
- changed_server_ids = []
- servers = self._get_servers_from_clc(
- server_ids,
- 'Failed to obtain server list from the CLC API')
- servers_to_change = [
- server for server in servers if len(
- server.PublicIPs().public_ips) > 0]
- for server in servers_to_change:
- if not self.module.check_mode:
- result = self._remove_publicip_from_server(server)
- results.append(result)
- changed_server_ids.append(server.id)
- changed = True
- return changed, changed_server_ids, results
-
- def _remove_publicip_from_server(self, server):
- result = None
- try:
- for ip_address in server.PublicIPs().public_ips:
- result = ip_address.Delete()
- except CLCException as ex:
- self.module.fail_json(msg='Failed to remove public ip from the server : {0}. {1}'.format(
- server.id, ex.response_text
- ))
- return result
-
- def _wait_for_requests_to_complete(self, requests_lst):
- """
- Waits until the CLC requests are complete if the wait argument is True
- :param requests_lst: The list of CLC request objects
- :return: none
- """
- if not self.module.params['wait']:
- return
- for request in requests_lst:
- request.WaitUntilComplete()
- for request_details in request.requests:
- if request_details.Status() != 'succeeded':
- self.module.fail_json(
- msg='Unable to process public ip request')
-
- def _set_clc_credentials_from_env(self):
- """
- Set the CLC Credentials on the sdk by reading environment variables
- :return: none
- """
- env = os.environ
- v2_api_token = env.get('CLC_V2_API_TOKEN', False)
- v2_api_username = env.get('CLC_V2_API_USERNAME', False)
- v2_api_passwd = env.get('CLC_V2_API_PASSWD', False)
- clc_alias = env.get('CLC_ACCT_ALIAS', False)
- api_url = env.get('CLC_V2_API_URL', False)
-
- if api_url:
- self.clc.defaults.ENDPOINT_URL_V2 = api_url
-
- if v2_api_token and clc_alias:
- self.clc._LOGIN_TOKEN_V2 = v2_api_token
- self.clc._V2_ENABLED = True
- self.clc.ALIAS = clc_alias
- elif v2_api_username and v2_api_passwd:
- self.clc.v2.SetCredentials(
- api_username=v2_api_username,
- api_passwd=v2_api_passwd)
- else:
- return self.module.fail_json(
- msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD "
- "environment variables")
-
- def _get_servers_from_clc(self, server_ids, message):
- """
- Gets list of servers form CLC api
- """
- try:
- return self.clc.v2.Servers(server_ids).servers
- except CLCException as exception:
- self.module.fail_json(msg=message + ': %s' % exception)
-
- @staticmethod
- def _set_user_agent(clc):
- if hasattr(clc, 'SetRequestsSession'):
- agent_string = "ClcAnsibleModule/" + __version__
- ses = requests.Session()
- ses.headers.update({"Api-Client": agent_string})
- ses.headers['User-Agent'] += " " + agent_string
- clc.SetRequestsSession(ses)
-
-
-def main():
- """
- The main function. Instantiates the module and calls process_request.
- :return: none
- """
- module = AnsibleModule(
- argument_spec=ClcPublicIp._define_module_argument_spec(),
- supports_check_mode=True
- )
- clc_public_ip = ClcPublicIp(module)
- clc_public_ip.process_request()
-
-
-if __name__ == '__main__':
- main()
diff --git a/plugins/modules/clc_server.py b/plugins/modules/clc_server.py
deleted file mode 100644
index a843032cec..0000000000
--- a/plugins/modules/clc_server.py
+++ /dev/null
@@ -1,1554 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-#
-# Copyright (c) 2015 CenturyLink
-# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = r"""
-module: clc_server
-short_description: Create, Delete, Start and Stop servers in CenturyLink Cloud
-description:
- - An Ansible module to Create, Delete, Start and Stop servers in CenturyLink Cloud.
-deprecated:
- removed_in: 11.0.0
- why: >
- Lumen Public Cloud (formerly known as CenturyLink Cloud) has gone End-of-Life in September 2023.
- See more at U(https://www.ctl.io/knowledge-base/release-notes/2023/lumen-public-cloud-platform-end-of-life-notice/?).
- alternative: There is none.
-extends_documentation_fragment:
- - community.general.attributes
- - community.general.clc
-author:
- - "CLC Runner (@clc-runner)"
-attributes:
- check_mode:
- support: full
- diff_mode:
- support: none
-options:
- additional_disks:
- description:
- - The list of additional disks for the server.
- type: list
- elements: dict
- default: []
- add_public_ip:
- description:
- - Whether to add a public IP to the server.
- type: bool
- default: false
- alias:
- description:
- - The account alias to provision the servers under.
- type: str
- anti_affinity_policy_id:
- description:
- - The anti-affinity policy to assign to the server. This is mutually exclusive with O(anti_affinity_policy_name).
- type: str
- anti_affinity_policy_name:
- description:
- - The anti-affinity policy to assign to the server. This is mutually exclusive with O(anti_affinity_policy_id).
- type: str
- alert_policy_id:
- description:
- - The alert policy to assign to the server. This is mutually exclusive with O(alert_policy_name).
- type: str
- alert_policy_name:
- description:
- - The alert policy to assign to the server. This is mutually exclusive with O(alert_policy_id).
- type: str
- count:
- description:
- - The number of servers to build (mutually exclusive with O(exact_count)).
- default: 1
- type: int
- count_group:
- description:
- - Required when exact_count is specified. The Server Group use to determine how many servers to deploy.
- type: str
- cpu:
- description:
- - How many CPUs to provision on the server.
- default: 1
- type: int
- cpu_autoscale_policy_id:
- description:
- - The autoscale policy to assign to the server.
- type: str
- custom_fields:
- description:
- - The list of custom fields to set on the server.
- type: list
- default: []
- elements: dict
- description:
- description:
- - The description to set for the server.
- type: str
- exact_count:
- description:
- - Run in idempotent mode. Will insure that this exact number of servers are running in the provided group, creating
- and deleting them to reach that count. Requires O(count_group) to be set.
- type: int
- group:
- description:
- - The Server Group to create servers under.
- type: str
- default: 'Default Group'
- ip_address:
- description:
- - The IP Address for the server. One is assigned if not provided.
- type: str
- location:
- description:
- - The Datacenter to create servers in.
- type: str
- managed_os:
- description:
- - Whether to create the server as 'Managed' or not.
- type: bool
- default: false
- required: false
- memory:
- description:
- - Memory in GB.
- type: int
- default: 1
- name:
- description:
- - A 1 to 6 character identifier to use for the server. This is required when O(state=present).
- type: str
- network_id:
- description:
- - The network UUID on which to create servers.
- type: str
- packages:
- description:
- - The list of blue print packages to run on the server after its created.
- type: list
- elements: dict
- default: []
- password:
- description:
- - Password for the administrator / root user.
- type: str
- primary_dns:
- description:
- - Primary DNS used by the server.
- type: str
- public_ip_protocol:
- description:
- - The protocol to use for the public IP if O(add_public_ip=true).
- type: str
- default: 'TCP'
- choices: ['TCP', 'UDP', 'ICMP']
- public_ip_ports:
- description:
- - A list of ports to allow on the firewall to the servers public IP, if O(add_public_ip=true).
- type: list
- elements: dict
- default: []
- secondary_dns:
- description:
- - Secondary DNS used by the server.
- type: str
- server_ids:
- description:
- - Required for started, stopped, and absent states. A list of server IDs to ensure are started, stopped, or absent.
- type: list
- default: []
- elements: str
- source_server_password:
- description:
- - The password for the source server if a clone is specified.
- type: str
- state:
- description:
- - The state to insure that the provided resources are in.
- type: str
- default: 'present'
- choices: ['present', 'absent', 'started', 'stopped']
- storage_type:
- description:
- - The type of storage to attach to the server.
- type: str
- default: 'standard'
- choices: ['standard', 'hyperscale']
- template:
- description:
- - The template to use for server creation. Will search for a template if a partial string is provided. This is required
- when O(state=present).
- type: str
- ttl:
- description:
- - The time to live for the server in seconds. The server will be deleted when this time expires.
- type: str
- type:
- description:
- - The type of server to create.
- type: str
- default: 'standard'
- choices: ['standard', 'hyperscale', 'bareMetal']
- configuration_id:
- description:
- - Only required for bare metal servers. Specifies the identifier for the specific configuration type of bare metal server
- to deploy.
- type: str
- os_type:
- description:
- - Only required for bare metal servers. Specifies the OS to provision with the bare metal server.
- type: str
- choices: ['redHat6_64Bit', 'centOS6_64Bit', 'windows2012R2Standard_64Bit', 'ubuntu14_64Bit']
- wait:
- description:
- - Whether to wait for the provisioning tasks to finish before returning.
- type: bool
- default: true
-"""
-
-EXAMPLES = r"""
-# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples
-
-- name: Provision a single Ubuntu Server
- community.general.clc_server:
- name: test
- template: ubuntu-14-64
- count: 1
- group: Default Group
- state: present
-
-- name: Ensure 'Default Group' has exactly 5 servers
- community.general.clc_server:
- name: test
- template: ubuntu-14-64
- exact_count: 5
- count_group: Default Group
- group: Default Group
-
-- name: Stop a Server
- community.general.clc_server:
- server_ids:
- - UC1ACCT-TEST01
- state: stopped
-
-- name: Start a Server
- community.general.clc_server:
- server_ids:
- - UC1ACCT-TEST01
- state: started
-
-- name: Delete a Server
- community.general.clc_server:
- server_ids:
- - UC1ACCT-TEST01
- state: absent
-"""
-
-RETURN = r"""
-server_ids:
- description: The list of server IDs that are created.
- returned: success
- type: list
- sample: ["UC1TEST-SVR01", "UC1TEST-SVR02"]
-partially_created_server_ids:
- description: The list of server IDs that are partially created.
- returned: success
- type: list
- sample: ["UC1TEST-SVR01", "UC1TEST-SVR02"]
-servers:
- description: The list of server objects returned from CLC.
- returned: success
- type: list
- sample:
- [
- {
- "changeInfo":{
- "createdBy":"service.wfad",
- "createdDate":1438196820,
- "modifiedBy":"service.wfad",
- "modifiedDate":1438196820
- },
- "description":"test-server",
- "details":{
- "alertPolicies":[
-
- ],
- "cpu":1,
- "customFields":[
-
- ],
- "diskCount":3,
- "disks":[
- {
- "id":"0:0",
- "partitionPaths":[
-
- ],
- "sizeGB":1
- },
- {
- "id":"0:1",
- "partitionPaths":[
-
- ],
- "sizeGB":2
- },
- {
- "id":"0:2",
- "partitionPaths":[
-
- ],
- "sizeGB":14
- }
- ],
- "hostName":"",
- "inMaintenanceMode":false,
- "ipAddresses":[
- {
- "internal":"10.1.1.1"
- }
- ],
- "memoryGB":1,
- "memoryMB":1024,
- "partitions":[
-
- ],
- "powerState":"started",
- "snapshots":[
-
- ],
- "storageGB":17
- },
- "groupId":"086ac1dfe0b6411989e8d1b77c4065f0",
- "id":"test-server",
- "ipaddress":"10.120.45.23",
- "isTemplate":false,
- "links":[
- {
- "href":"/v2/servers/wfad/test-server",
- "id":"test-server",
- "rel":"self",
- "verbs":[
- "GET",
- "PATCH",
- "DELETE"
- ]
- },
- {
- "href":"/v2/groups/wfad/086ac1dfe0b6411989e8d1b77c4065f0",
- "id":"086ac1dfe0b6411989e8d1b77c4065f0",
- "rel":"group"
- },
- {
- "href":"/v2/accounts/wfad",
- "id":"wfad",
- "rel":"account"
- },
- {
- "href":"/v2/billing/wfad/serverPricing/test-server",
- "rel":"billing"
- },
- {
- "href":"/v2/servers/wfad/test-server/publicIPAddresses",
- "rel":"publicIPAddresses",
- "verbs":[
- "POST"
- ]
- },
- {
- "href":"/v2/servers/wfad/test-server/credentials",
- "rel":"credentials"
- },
- {
- "href":"/v2/servers/wfad/test-server/statistics",
- "rel":"statistics"
- },
- {
- "href":"/v2/servers/wfad/510ec21ae82d4dc89d28479753bf736a/upcomingScheduledActivities",
- "rel":"upcomingScheduledActivities"
- },
- {
- "href":"/v2/servers/wfad/510ec21ae82d4dc89d28479753bf736a/scheduledActivities",
- "rel":"scheduledActivities",
- "verbs":[
- "GET",
- "POST"
- ]
- },
- {
- "href":"/v2/servers/wfad/test-server/capabilities",
- "rel":"capabilities"
- },
- {
- "href":"/v2/servers/wfad/test-server/alertPolicies",
- "rel":"alertPolicyMappings",
- "verbs":[
- "POST"
- ]
- },
- {
- "href":"/v2/servers/wfad/test-server/antiAffinityPolicy",
- "rel":"antiAffinityPolicyMapping",
- "verbs":[
- "PUT",
- "DELETE"
- ]
- },
- {
- "href":"/v2/servers/wfad/test-server/cpuAutoscalePolicy",
- "rel":"cpuAutoscalePolicyMapping",
- "verbs":[
- "PUT",
- "DELETE"
- ]
- }
- ],
- "locationId":"UC1",
- "name":"test-server",
- "os":"ubuntu14_64Bit",
- "osType":"Ubuntu 14 64-bit",
- "status":"active",
- "storageType":"standard",
- "type":"standard"
- }
- ]
-"""
-
-__version__ = '${version}'
-
-import json
-import os
-import time
-import traceback
-
-from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
-
-REQUESTS_IMP_ERR = None
-try:
- import requests
-except ImportError:
- REQUESTS_IMP_ERR = traceback.format_exc()
- REQUESTS_FOUND = False
-else:
- REQUESTS_FOUND = True
-
-#
-# Requires the clc-python-sdk.
-# sudo pip install clc-sdk
-#
-CLC_IMP_ERR = None
-try:
- import clc as clc_sdk
- from clc import CLCException
- from clc import APIFailedResponse
-except ImportError:
- CLC_IMP_ERR = traceback.format_exc()
- CLC_FOUND = False
- clc_sdk = None
-else:
- CLC_FOUND = True
-
-from ansible.module_utils.basic import AnsibleModule, missing_required_lib
-
-
-class ClcServer:
- clc = clc_sdk
-
- def __init__(self, module):
- """
- Construct module
- """
- self.clc = clc_sdk
- self.module = module
- self.group_dict = {}
-
- if not CLC_FOUND:
- self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR)
- if not REQUESTS_FOUND:
- self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR)
- if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'):
- self.module.fail_json(
- msg='requests library version should be >= 2.5.0')
-
- self._set_user_agent(self.clc)
-
- def process_request(self):
- """
- Process the request - Main Code Path
- :return: Returns with either an exit_json or fail_json
- """
- changed = False
- new_server_ids = []
- server_dict_array = []
-
- self._set_clc_credentials_from_env()
- self.module.params = self._validate_module_params(
- self.clc,
- self.module)
- p = self.module.params
- state = p.get('state')
-
- #
- # Handle each state
- #
- partial_servers_ids = []
- if state == 'absent':
- server_ids = p['server_ids']
- if not isinstance(server_ids, list):
- return self.module.fail_json(
- msg='server_ids needs to be a list of instances to delete: %s' %
- server_ids)
-
- (changed,
- server_dict_array,
- new_server_ids) = self._delete_servers(module=self.module,
- clc=self.clc,
- server_ids=server_ids)
-
- elif state in ('started', 'stopped'):
- server_ids = p.get('server_ids')
- if not isinstance(server_ids, list):
- return self.module.fail_json(
- msg='server_ids needs to be a list of servers to run: %s' %
- server_ids)
-
- (changed,
- server_dict_array,
- new_server_ids) = self._start_stop_servers(self.module,
- self.clc,
- server_ids)
-
- elif state == 'present':
- # Changed is always set to true when provisioning new instances
- if not p.get('template') and p.get('type') != 'bareMetal':
- return self.module.fail_json(
- msg='template parameter is required for new instance')
-
- if p.get('exact_count') is None:
- (server_dict_array,
- new_server_ids,
- partial_servers_ids,
- changed) = self._create_servers(self.module,
- self.clc)
- else:
- (server_dict_array,
- new_server_ids,
- partial_servers_ids,
- changed) = self._enforce_count(self.module,
- self.clc)
-
- self.module.exit_json(
- changed=changed,
- server_ids=new_server_ids,
- partially_created_server_ids=partial_servers_ids,
- servers=server_dict_array)
-
- @staticmethod
- def _define_module_argument_spec():
- """
- Define the argument spec for the ansible module
- :return: argument spec dictionary
- """
- argument_spec = dict(
- name=dict(),
- template=dict(),
- group=dict(default='Default Group'),
- network_id=dict(),
- location=dict(),
- cpu=dict(default=1, type='int'),
- memory=dict(default=1, type='int'),
- alias=dict(),
- password=dict(no_log=True),
- ip_address=dict(),
- storage_type=dict(
- default='standard',
- choices=[
- 'standard',
- 'hyperscale']),
- type=dict(default='standard', choices=['standard', 'hyperscale', 'bareMetal']),
- primary_dns=dict(),
- secondary_dns=dict(),
- additional_disks=dict(type='list', default=[], elements='dict'),
- custom_fields=dict(type='list', default=[], elements='dict'),
- ttl=dict(),
- managed_os=dict(type='bool', default=False),
- description=dict(),
- source_server_password=dict(no_log=True),
- cpu_autoscale_policy_id=dict(),
- anti_affinity_policy_id=dict(),
- anti_affinity_policy_name=dict(),
- alert_policy_id=dict(),
- alert_policy_name=dict(),
- packages=dict(type='list', default=[], elements='dict'),
- state=dict(
- default='present',
- choices=[
- 'present',
- 'absent',
- 'started',
- 'stopped']),
- count=dict(type='int', default=1),
- exact_count=dict(type='int', ),
- count_group=dict(),
- server_ids=dict(type='list', default=[], elements='str'),
- add_public_ip=dict(type='bool', default=False),
- public_ip_protocol=dict(
- default='TCP',
- choices=[
- 'TCP',
- 'UDP',
- 'ICMP']),
- public_ip_ports=dict(type='list', default=[], elements='dict'),
- configuration_id=dict(),
- os_type=dict(choices=[
- 'redHat6_64Bit',
- 'centOS6_64Bit',
- 'windows2012R2Standard_64Bit',
- 'ubuntu14_64Bit'
- ]),
- wait=dict(type='bool', default=True))
-
- mutually_exclusive = [
- ['exact_count', 'count'],
- ['exact_count', 'state'],
- ['anti_affinity_policy_id', 'anti_affinity_policy_name'],
- ['alert_policy_id', 'alert_policy_name'],
- ]
- return {"argument_spec": argument_spec,
- "mutually_exclusive": mutually_exclusive}
-
- def _set_clc_credentials_from_env(self):
- """
- Set the CLC Credentials on the sdk by reading environment variables
- :return: none
- """
- env = os.environ
- v2_api_token = env.get('CLC_V2_API_TOKEN', False)
- v2_api_username = env.get('CLC_V2_API_USERNAME', False)
- v2_api_passwd = env.get('CLC_V2_API_PASSWD', False)
- clc_alias = env.get('CLC_ACCT_ALIAS', False)
- api_url = env.get('CLC_V2_API_URL', False)
- if api_url:
- self.clc.defaults.ENDPOINT_URL_V2 = api_url
-
- if v2_api_token and clc_alias:
- self.clc._LOGIN_TOKEN_V2 = v2_api_token
- self.clc._V2_ENABLED = True
- self.clc.ALIAS = clc_alias
- elif v2_api_username and v2_api_passwd:
- self.clc.v2.SetCredentials(
- api_username=v2_api_username,
- api_passwd=v2_api_passwd)
- else:
- return self.module.fail_json(
- msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD "
- "environment variables")
-
- @staticmethod
- def _validate_module_params(clc, module):
- """
- Validate the module params, and lookup default values.
- :param clc: clc-sdk instance to use
- :param module: module to validate
- :return: dictionary of validated params
- """
- params = module.params
- datacenter = ClcServer._find_datacenter(clc, module)
-
- ClcServer._validate_types(module)
- ClcServer._validate_name(module)
-
- params['alias'] = ClcServer._find_alias(clc, module)
- params['cpu'] = ClcServer._find_cpu(clc, module)
- params['memory'] = ClcServer._find_memory(clc, module)
- params['description'] = ClcServer._find_description(module)
- params['ttl'] = ClcServer._find_ttl(clc, module)
- params['template'] = ClcServer._find_template_id(module, datacenter)
- params['group'] = ClcServer._find_group(module, datacenter).id
- params['network_id'] = ClcServer._find_network_id(module, datacenter)
- params['anti_affinity_policy_id'] = ClcServer._find_aa_policy_id(
- clc,
- module)
- params['alert_policy_id'] = ClcServer._find_alert_policy_id(
- clc,
- module)
-
- return params
-
- @staticmethod
- def _find_datacenter(clc, module):
- """
- Find the datacenter by calling the CLC API.
- :param clc: clc-sdk instance to use
- :param module: module to validate
- :return: clc-sdk.Datacenter instance
- """
- location = module.params.get('location')
- try:
- if not location:
- account = clc.v2.Account()
- location = account.data.get('primaryDataCenter')
- data_center = clc.v2.Datacenter(location)
- return data_center
- except CLCException:
- module.fail_json(msg="Unable to find location: {0}".format(location))
-
- @staticmethod
- def _find_alias(clc, module):
- """
- Find or Validate the Account Alias by calling the CLC API
- :param clc: clc-sdk instance to use
- :param module: module to validate
- :return: clc-sdk.Account instance
- """
- alias = module.params.get('alias')
- if not alias:
- try:
- alias = clc.v2.Account.GetAlias()
- except CLCException as ex:
- module.fail_json(msg='Unable to find account alias. {0}'.format(
- ex.message
- ))
- return alias
-
- @staticmethod
- def _find_cpu(clc, module):
- """
- Find or validate the CPU value by calling the CLC API
- :param clc: clc-sdk instance to use
- :param module: module to validate
- :return: Int value for CPU
- """
- cpu = module.params.get('cpu')
- group_id = module.params.get('group_id')
- alias = module.params.get('alias')
- state = module.params.get('state')
-
- if not cpu and state == 'present':
- group = clc.v2.Group(id=group_id,
- alias=alias)
- if group.Defaults("cpu"):
- cpu = group.Defaults("cpu")
- else:
- module.fail_json(
- msg=str("Can\'t determine a default cpu value. Please provide a value for cpu."))
- return cpu
-
- @staticmethod
- def _find_memory(clc, module):
- """
- Find or validate the Memory value by calling the CLC API
- :param clc: clc-sdk instance to use
- :param module: module to validate
- :return: Int value for Memory
- """
- memory = module.params.get('memory')
- group_id = module.params.get('group_id')
- alias = module.params.get('alias')
- state = module.params.get('state')
-
- if not memory and state == 'present':
- group = clc.v2.Group(id=group_id,
- alias=alias)
- if group.Defaults("memory"):
- memory = group.Defaults("memory")
- else:
- module.fail_json(msg=str(
- "Can\'t determine a default memory value. Please provide a value for memory."))
- return memory
-
- @staticmethod
- def _find_description(module):
- """
- Set the description module param to name if description is blank
- :param module: the module to validate
- :return: string description
- """
- description = module.params.get('description')
- if not description:
- description = module.params.get('name')
- return description
-
- @staticmethod
- def _validate_types(module):
- """
- Validate that type and storage_type are set appropriately, and fail if not
- :param module: the module to validate
- :return: none
- """
- state = module.params.get('state')
- server_type = module.params.get(
- 'type').lower() if module.params.get('type') else None
- storage_type = module.params.get(
- 'storage_type').lower() if module.params.get('storage_type') else None
-
- if state == "present":
- if server_type == "standard" and storage_type not in (
- "standard", "premium"):
- module.fail_json(
- msg=str("Standard VMs must have storage_type = 'standard' or 'premium'"))
-
- if server_type == "hyperscale" and storage_type != "hyperscale":
- module.fail_json(
- msg=str("Hyperscale VMs must have storage_type = 'hyperscale'"))
-
- @staticmethod
- def _validate_name(module):
- """
- Validate that name is the correct length if provided, fail if it is not
- :param module: the module to validate
- :return: none
- """
- server_name = module.params.get('name')
- state = module.params.get('state')
-
- if state == 'present' and (
- len(server_name) < 1 or len(server_name) > 6):
- module.fail_json(msg=str(
- "When state = 'present', name must be a string with a minimum length of 1 and a maximum length of 6"))
-
- @staticmethod
- def _find_ttl(clc, module):
- """
- Validate that TTL is > 3600 if set, and fail if not
- :param clc: clc-sdk instance to use
- :param module: module to validate
- :return: validated ttl
- """
- ttl = module.params.get('ttl')
-
- if ttl:
- if ttl <= 3600:
- return module.fail_json(msg=str("Ttl cannot be <= 3600"))
- else:
- ttl = clc.v2.time_utils.SecondsToZuluTS(int(time.time()) + ttl)
- return ttl
-
- @staticmethod
- def _find_template_id(module, datacenter):
- """
- Find the template id by calling the CLC API.
- :param module: the module to validate
- :param datacenter: the datacenter to search for the template
- :return: a valid clc template id
- """
- lookup_template = module.params.get('template')
- state = module.params.get('state')
- type = module.params.get('type')
- result = None
-
- if state == 'present' and type != 'bareMetal':
- try:
- result = datacenter.Templates().Search(lookup_template)[0].id
- except CLCException:
- module.fail_json(
- msg=str(
- "Unable to find a template: " +
- lookup_template +
- " in location: " +
- datacenter.id))
- return result
-
- @staticmethod
- def _find_network_id(module, datacenter):
- """
- Validate the provided network id or return a default.
- :param module: the module to validate
- :param datacenter: the datacenter to search for a network id
- :return: a valid network id
- """
- network_id = module.params.get('network_id')
-
- if not network_id:
- try:
- network_id = datacenter.Networks().networks[0].id
- # -- added for clc-sdk 2.23 compatibility
- # datacenter_networks = clc_sdk.v2.Networks(
- # networks_lst=datacenter._DeploymentCapabilities()['deployableNetworks'])
- # network_id = datacenter_networks.networks[0].id
- # -- end
- except CLCException:
- module.fail_json(
- msg=str(
- "Unable to find a network in location: " +
- datacenter.id))
-
- return network_id
-
- @staticmethod
- def _find_aa_policy_id(clc, module):
- """
- Validate if the anti affinity policy exist for the given name and throw error if not
- :param clc: the clc-sdk instance
- :param module: the module to validate
- :return: aa_policy_id: the anti affinity policy id of the given name.
- """
- aa_policy_id = module.params.get('anti_affinity_policy_id')
- aa_policy_name = module.params.get('anti_affinity_policy_name')
- if not aa_policy_id and aa_policy_name:
- alias = module.params.get('alias')
- aa_policy_id = ClcServer._get_anti_affinity_policy_id(
- clc,
- module,
- alias,
- aa_policy_name)
- if not aa_policy_id:
- module.fail_json(
- msg='No anti affinity policy was found with policy name : %s' % aa_policy_name)
- return aa_policy_id
-
- @staticmethod
- def _find_alert_policy_id(clc, module):
- """
- Validate if the alert policy exist for the given name and throw error if not
- :param clc: the clc-sdk instance
- :param module: the module to validate
- :return: alert_policy_id: the alert policy id of the given name.
- """
- alert_policy_id = module.params.get('alert_policy_id')
- alert_policy_name = module.params.get('alert_policy_name')
- if not alert_policy_id and alert_policy_name:
- alias = module.params.get('alias')
- alert_policy_id = ClcServer._get_alert_policy_id_by_name(
- clc=clc,
- module=module,
- alias=alias,
- alert_policy_name=alert_policy_name
- )
- if not alert_policy_id:
- module.fail_json(
- msg='No alert policy exist with name : %s' % alert_policy_name)
- return alert_policy_id
-
- def _create_servers(self, module, clc, override_count=None):
- """
- Create New Servers in CLC cloud
- :param module: the AnsibleModule object
- :param clc: the clc-sdk instance to use
- :return: a list of dictionaries with server information about the servers that were created
- """
- p = module.params
- request_list = []
- servers = []
- server_dict_array = []
- created_server_ids = []
- partial_created_servers_ids = []
-
- add_public_ip = p.get('add_public_ip')
- public_ip_protocol = p.get('public_ip_protocol')
- public_ip_ports = p.get('public_ip_ports')
-
- params = {
- 'name': p.get('name'),
- 'template': p.get('template'),
- 'group_id': p.get('group'),
- 'network_id': p.get('network_id'),
- 'cpu': p.get('cpu'),
- 'memory': p.get('memory'),
- 'alias': p.get('alias'),
- 'password': p.get('password'),
- 'ip_address': p.get('ip_address'),
- 'storage_type': p.get('storage_type'),
- 'type': p.get('type'),
- 'primary_dns': p.get('primary_dns'),
- 'secondary_dns': p.get('secondary_dns'),
- 'additional_disks': p.get('additional_disks'),
- 'custom_fields': p.get('custom_fields'),
- 'ttl': p.get('ttl'),
- 'managed_os': p.get('managed_os'),
- 'description': p.get('description'),
- 'source_server_password': p.get('source_server_password'),
- 'cpu_autoscale_policy_id': p.get('cpu_autoscale_policy_id'),
- 'anti_affinity_policy_id': p.get('anti_affinity_policy_id'),
- 'packages': p.get('packages'),
- 'configuration_id': p.get('configuration_id'),
- 'os_type': p.get('os_type')
- }
-
- count = override_count if override_count else p.get('count')
-
- changed = False if count == 0 else True
-
- if not changed:
- return server_dict_array, created_server_ids, partial_created_servers_ids, changed
- for i in range(0, count):
- if not module.check_mode:
- req = self._create_clc_server(clc=clc,
- module=module,
- server_params=params)
- server = req.requests[0].Server()
- request_list.append(req)
- servers.append(server)
-
- self._wait_for_requests(module, request_list)
- self._refresh_servers(module, servers)
-
- ip_failed_servers = self._add_public_ip_to_servers(
- module=module,
- should_add_public_ip=add_public_ip,
- servers=servers,
- public_ip_protocol=public_ip_protocol,
- public_ip_ports=public_ip_ports)
- ap_failed_servers = self._add_alert_policy_to_servers(clc=clc,
- module=module,
- servers=servers)
-
- for server in servers:
- if server in ip_failed_servers or server in ap_failed_servers:
- partial_created_servers_ids.append(server.id)
- else:
- # reload server details
- server = clc.v2.Server(server.id)
- server.data['ipaddress'] = server.details[
- 'ipAddresses'][0]['internal']
-
- if add_public_ip and len(server.PublicIPs().public_ips) > 0:
- server.data['publicip'] = str(
- server.PublicIPs().public_ips[0])
- created_server_ids.append(server.id)
- server_dict_array.append(server.data)
-
- return server_dict_array, created_server_ids, partial_created_servers_ids, changed
-
- def _enforce_count(self, module, clc):
- """
- Enforce that there is the right number of servers in the provided group.
- Starts or stops servers as necessary.
- :param module: the AnsibleModule object
- :param clc: the clc-sdk instance to use
- :return: a list of dictionaries with server information about the servers that were created or deleted
- """
- p = module.params
- changed = False
- count_group = p.get('count_group')
- datacenter = ClcServer._find_datacenter(clc, module)
- exact_count = p.get('exact_count')
- server_dict_array = []
- partial_servers_ids = []
- changed_server_ids = []
-
- # fail here if the exact count was specified without filtering
- # on a group, as this may lead to a undesired removal of instances
- if exact_count and count_group is None:
- return module.fail_json(
- msg="you must use the 'count_group' option with exact_count")
-
- servers, running_servers = ClcServer._find_running_servers_by_group(
- module, datacenter, count_group)
-
- if len(running_servers) == exact_count:
- changed = False
-
- elif len(running_servers) < exact_count:
- to_create = exact_count - len(running_servers)
- server_dict_array, changed_server_ids, partial_servers_ids, changed \
- = self._create_servers(module, clc, override_count=to_create)
-
- for server in server_dict_array:
- running_servers.append(server)
-
- elif len(running_servers) > exact_count:
- to_remove = len(running_servers) - exact_count
- all_server_ids = sorted([x.id for x in running_servers])
- remove_ids = all_server_ids[0:to_remove]
-
- (changed, server_dict_array, changed_server_ids) \
- = ClcServer._delete_servers(module, clc, remove_ids)
-
- return server_dict_array, changed_server_ids, partial_servers_ids, changed
-
- @staticmethod
- def _wait_for_requests(module, request_list):
- """
- Block until server provisioning requests are completed.
- :param module: the AnsibleModule object
- :param request_list: a list of clc-sdk.Request instances
- :return: none
- """
- wait = module.params.get('wait')
- if wait:
- # Requests.WaitUntilComplete() returns the count of failed requests
- failed_requests_count = sum(
- [request.WaitUntilComplete() for request in request_list])
-
- if failed_requests_count > 0:
- module.fail_json(
- msg='Unable to process server request')
-
- @staticmethod
- def _refresh_servers(module, servers):
- """
- Loop through a list of servers and refresh them.
- :param module: the AnsibleModule object
- :param servers: list of clc-sdk.Server instances to refresh
- :return: none
- """
- for server in servers:
- try:
- server.Refresh()
- except CLCException as ex:
- module.fail_json(msg='Unable to refresh the server {0}. {1}'.format(
- server.id, ex.message
- ))
-
- @staticmethod
- def _add_public_ip_to_servers(
- module,
- should_add_public_ip,
- servers,
- public_ip_protocol,
- public_ip_ports):
- """
- Create a public IP for servers
- :param module: the AnsibleModule object
- :param should_add_public_ip: boolean - whether or not to provision a public ip for servers. Skipped if False
- :param servers: List of servers to add public ips to
- :param public_ip_protocol: a protocol to allow for the public ips
- :param public_ip_ports: list of ports to allow for the public ips
- :return: none
- """
- failed_servers = []
- if not should_add_public_ip:
- return failed_servers
-
- ports_lst = []
- request_list = []
- server = None
-
- for port in public_ip_ports:
- ports_lst.append(
- {'protocol': public_ip_protocol, 'port': port})
- try:
- if not module.check_mode:
- for server in servers:
- request = server.PublicIPs().Add(ports_lst)
- request_list.append(request)
- except APIFailedResponse:
- failed_servers.append(server)
- ClcServer._wait_for_requests(module, request_list)
- return failed_servers
-
- @staticmethod
- def _add_alert_policy_to_servers(clc, module, servers):
- """
- Associate the alert policy to servers
- :param clc: the clc-sdk instance to use
- :param module: the AnsibleModule object
- :param servers: List of servers to add alert policy to
- :return: failed_servers: the list of servers which failed while associating alert policy
- """
- failed_servers = []
- p = module.params
- alert_policy_id = p.get('alert_policy_id')
- alias = p.get('alias')
-
- if alert_policy_id and not module.check_mode:
- for server in servers:
- try:
- ClcServer._add_alert_policy_to_server(
- clc=clc,
- alias=alias,
- server_id=server.id,
- alert_policy_id=alert_policy_id)
- except CLCException:
- failed_servers.append(server)
- return failed_servers
-
- @staticmethod
- def _add_alert_policy_to_server(
- clc, alias, server_id, alert_policy_id):
- """
- Associate an alert policy to a clc server
- :param clc: the clc-sdk instance to use
- :param alias: the clc account alias
- :param server_id: The clc server id
- :param alert_policy_id: the alert policy id to be associated to the server
- :return: none
- """
- try:
- clc.v2.API.Call(
- method='POST',
- url='servers/%s/%s/alertPolicies' % (alias, server_id),
- payload=json.dumps(
- {
- 'id': alert_policy_id
- }))
- except APIFailedResponse as e:
- raise CLCException(
- 'Failed to associate alert policy to the server : {0} with Error {1}'.format(
- server_id, str(e.response_text)))
-
- @staticmethod
- def _get_alert_policy_id_by_name(clc, module, alias, alert_policy_name):
- """
- Returns the alert policy id for the given alert policy name
- :param clc: the clc-sdk instance to use
- :param module: the AnsibleModule object
- :param alias: the clc account alias
- :param alert_policy_name: the name of the alert policy
- :return: alert_policy_id: the alert policy id
- """
- alert_policy_id = None
- policies = clc.v2.API.Call('GET', '/v2/alertPolicies/%s' % alias)
- if not policies:
- return alert_policy_id
- for policy in policies.get('items'):
- if policy.get('name') == alert_policy_name:
- if not alert_policy_id:
- alert_policy_id = policy.get('id')
- else:
- return module.fail_json(
- msg='multiple alert policies were found with policy name : %s' % alert_policy_name)
- return alert_policy_id
-
- @staticmethod
- def _delete_servers(module, clc, server_ids):
- """
- Delete the servers on the provided list
- :param module: the AnsibleModule object
- :param clc: the clc-sdk instance to use
- :param server_ids: list of servers to delete
- :return: a list of dictionaries with server information about the servers that were deleted
- """
- terminated_server_ids = []
- server_dict_array = []
- request_list = []
-
- if not isinstance(server_ids, list) or len(server_ids) < 1:
- return module.fail_json(
- msg='server_ids should be a list of servers, aborting')
-
- servers = clc.v2.Servers(server_ids).Servers()
- for server in servers:
- if not module.check_mode:
- request_list.append(server.Delete())
- ClcServer._wait_for_requests(module, request_list)
-
- for server in servers:
- terminated_server_ids.append(server.id)
-
- return True, server_dict_array, terminated_server_ids
-
- @staticmethod
- def _start_stop_servers(module, clc, server_ids):
- """
- Start or Stop the servers on the provided list
- :param module: the AnsibleModule object
- :param clc: the clc-sdk instance to use
- :param server_ids: list of servers to start or stop
- :return: a list of dictionaries with server information about the servers that were started or stopped
- """
- p = module.params
- state = p.get('state')
- changed = False
- changed_servers = []
- server_dict_array = []
- result_server_ids = []
- request_list = []
-
- if not isinstance(server_ids, list) or len(server_ids) < 1:
- return module.fail_json(
- msg='server_ids should be a list of servers, aborting')
-
- servers = clc.v2.Servers(server_ids).Servers()
- for server in servers:
- if server.powerState != state:
- changed_servers.append(server)
- if not module.check_mode:
- request_list.append(
- ClcServer._change_server_power_state(
- module,
- server,
- state))
- changed = True
-
- ClcServer._wait_for_requests(module, request_list)
- ClcServer._refresh_servers(module, changed_servers)
-
- for server in set(changed_servers + servers):
- try:
- server.data['ipaddress'] = server.details[
- 'ipAddresses'][0]['internal']
- server.data['publicip'] = str(
- server.PublicIPs().public_ips[0])
- except (KeyError, IndexError):
- pass
-
- server_dict_array.append(server.data)
- result_server_ids.append(server.id)
-
- return changed, server_dict_array, result_server_ids
-
- @staticmethod
- def _change_server_power_state(module, server, state):
- """
- Change the server powerState
- :param module: the module to check for intended state
- :param server: the server to start or stop
- :param state: the intended powerState for the server
- :return: the request object from clc-sdk call
- """
- result = None
- try:
- if state == 'started':
- result = server.PowerOn()
- else:
- # Try to shut down the server and fall back to power off when unable to shut down.
- result = server.ShutDown()
- if result and hasattr(result, 'requests') and result.requests[0]:
- return result
- else:
- result = server.PowerOff()
- except CLCException:
- module.fail_json(
- msg='Unable to change power state for server {0}'.format(
- server.id))
- return result
-
- @staticmethod
- def _find_running_servers_by_group(module, datacenter, count_group):
- """
- Find a list of running servers in the provided group
- :param module: the AnsibleModule object
- :param datacenter: the clc-sdk.Datacenter instance to use to lookup the group
- :param count_group: the group to count the servers
- :return: list of servers, and list of running servers
- """
- group = ClcServer._find_group(
- module=module,
- datacenter=datacenter,
- lookup_group=count_group)
-
- servers = group.Servers().Servers()
- running_servers = []
-
- for server in servers:
- if server.status == 'active' and server.powerState == 'started':
- running_servers.append(server)
-
- return servers, running_servers
-
- @staticmethod
- def _find_group(module, datacenter, lookup_group=None):
- """
- Find a server group in a datacenter by calling the CLC API
- :param module: the AnsibleModule instance
- :param datacenter: clc-sdk.Datacenter instance to search for the group
- :param lookup_group: string name of the group to search for
- :return: clc-sdk.Group instance
- """
- if not lookup_group:
- lookup_group = module.params.get('group')
- try:
- return datacenter.Groups().Get(lookup_group)
- except CLCException:
- pass
-
- # The search above only acts on the main
- result = ClcServer._find_group_recursive(
- module,
- datacenter.Groups(),
- lookup_group)
-
- if result is None:
- module.fail_json(
- msg=str(
- "Unable to find group: " +
- lookup_group +
- " in location: " +
- datacenter.id))
-
- return result
-
- @staticmethod
- def _find_group_recursive(module, group_list, lookup_group):
- """
- Find a server group by recursively walking the tree
- :param module: the AnsibleModule instance to use
- :param group_list: a list of groups to search
- :param lookup_group: the group to look for
- :return: list of groups
- """
- result = None
- for group in group_list.groups:
- subgroups = group.Subgroups()
- try:
- return subgroups.Get(lookup_group)
- except CLCException:
- result = ClcServer._find_group_recursive(
- module,
- subgroups,
- lookup_group)
-
- if result is not None:
- break
-
- return result
-
- @staticmethod
- def _create_clc_server(
- clc,
- module,
- server_params):
- """
- Call the CLC Rest API to Create a Server
- :param clc: the clc-python-sdk instance to use
- :param module: the AnsibleModule instance to use
- :param server_params: a dictionary of params to use to create the servers
- :return: clc-sdk.Request object linked to the queued server request
- """
-
- try:
- res = clc.v2.API.Call(
- method='POST',
- url='servers/%s' %
- (server_params.get('alias')),
- payload=json.dumps(
- {
- 'name': server_params.get('name'),
- 'description': server_params.get('description'),
- 'groupId': server_params.get('group_id'),
- 'sourceServerId': server_params.get('template'),
- 'isManagedOS': server_params.get('managed_os'),
- 'primaryDNS': server_params.get('primary_dns'),
- 'secondaryDNS': server_params.get('secondary_dns'),
- 'networkId': server_params.get('network_id'),
- 'ipAddress': server_params.get('ip_address'),
- 'password': server_params.get('password'),
- 'sourceServerPassword': server_params.get('source_server_password'),
- 'cpu': server_params.get('cpu'),
- 'cpuAutoscalePolicyId': server_params.get('cpu_autoscale_policy_id'),
- 'memoryGB': server_params.get('memory'),
- 'type': server_params.get('type'),
- 'storageType': server_params.get('storage_type'),
- 'antiAffinityPolicyId': server_params.get('anti_affinity_policy_id'),
- 'customFields': server_params.get('custom_fields'),
- 'additionalDisks': server_params.get('additional_disks'),
- 'ttl': server_params.get('ttl'),
- 'packages': server_params.get('packages'),
- 'configurationId': server_params.get('configuration_id'),
- 'osType': server_params.get('os_type')}))
-
- result = clc.v2.Requests(res)
- except APIFailedResponse as ex:
- return module.fail_json(msg='Unable to create the server: {0}. {1}'.format(
- server_params.get('name'),
- ex.response_text
- ))
-
- #
- # Patch the Request object so that it returns a valid server
-
- # Find the server's UUID from the API response
- server_uuid = [obj['id']
- for obj in res['links'] if obj['rel'] == 'self'][0]
-
- # Change the request server method to a _find_server_by_uuid closure so
- # that it will work
- result.requests[0].Server = lambda: ClcServer._find_server_by_uuid_w_retry(
- clc,
- module,
- server_uuid,
- server_params.get('alias'))
-
- return result
-
- @staticmethod
- def _get_anti_affinity_policy_id(clc, module, alias, aa_policy_name):
- """
- retrieves the anti affinity policy id of the server based on the name of the policy
- :param clc: the clc-sdk instance to use
- :param module: the AnsibleModule object
- :param alias: the CLC account alias
- :param aa_policy_name: the anti affinity policy name
- :return: aa_policy_id: The anti affinity policy id
- """
- aa_policy_id = None
- try:
- aa_policies = clc.v2.API.Call(method='GET',
- url='antiAffinityPolicies/%s' % alias)
- except APIFailedResponse as ex:
- return module.fail_json(msg='Unable to fetch anti affinity policies for account: {0}. {1}'.format(
- alias, ex.response_text))
- for aa_policy in aa_policies.get('items'):
- if aa_policy.get('name') == aa_policy_name:
- if not aa_policy_id:
- aa_policy_id = aa_policy.get('id')
- else:
- return module.fail_json(
- msg='multiple anti affinity policies were found with policy name : %s' % aa_policy_name)
- return aa_policy_id
-
- #
- # This is the function that gets patched to the Request.server object using a lambda closure
- #
-
- @staticmethod
- def _find_server_by_uuid_w_retry(
- clc, module, svr_uuid, alias=None, retries=5, back_out=2):
- """
- Find the clc server by the UUID returned from the provisioning request. Retry the request if a 404 is returned.
- :param clc: the clc-sdk instance to use
- :param module: the AnsibleModule object
- :param svr_uuid: UUID of the server
- :param retries: the number of retry attempts to make prior to fail. default is 5
- :param alias: the Account Alias to search
- :return: a clc-sdk.Server instance
- """
- if not alias:
- alias = clc.v2.Account.GetAlias()
-
- # Wait and retry if the api returns a 404
- while True:
- retries -= 1
- try:
- server_obj = clc.v2.API.Call(
- method='GET', url='servers/%s/%s?uuid=true' %
- (alias, svr_uuid))
- server_id = server_obj['id']
- server = clc.v2.Server(
- id=server_id,
- alias=alias,
- server_obj=server_obj)
- return server
-
- except APIFailedResponse as e:
- if e.response_status_code != 404:
- return module.fail_json(
- msg='A failure response was received from CLC API when '
- 'attempting to get details for a server: UUID=%s, Code=%i, Message=%s' %
- (svr_uuid, e.response_status_code, e.message))
- if retries == 0:
- return module.fail_json(
- msg='Unable to reach the CLC API after 5 attempts')
- time.sleep(back_out)
- back_out *= 2
-
- @staticmethod
- def _set_user_agent(clc):
- if hasattr(clc, 'SetRequestsSession'):
- agent_string = "ClcAnsibleModule/" + __version__
- ses = requests.Session()
- ses.headers.update({"Api-Client": agent_string})
- ses.headers['User-Agent'] += " " + agent_string
- clc.SetRequestsSession(ses)
-
-
-def main():
- """
- The main function. Instantiates the module and calls process_request.
- :return: none
- """
- argument_dict = ClcServer._define_module_argument_spec()
- module = AnsibleModule(supports_check_mode=True, **argument_dict)
- clc_server = ClcServer(module)
- clc_server.process_request()
-
-
-if __name__ == '__main__':
- main()
diff --git a/plugins/modules/clc_server_snapshot.py b/plugins/modules/clc_server_snapshot.py
deleted file mode 100644
index 2550d0d936..0000000000
--- a/plugins/modules/clc_server_snapshot.py
+++ /dev/null
@@ -1,409 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-#
-# Copyright (c) 2015 CenturyLink
-# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = r"""
-module: clc_server_snapshot
-short_description: Create, Delete and Restore server snapshots in CenturyLink Cloud
-description:
- - An Ansible module to Create, Delete and Restore server snapshots in CenturyLink Cloud.
-deprecated:
- removed_in: 11.0.0
- why: >
- Lumen Public Cloud (formerly known as CenturyLink Cloud) has gone End-of-Life in September 2023.
- See more at U(https://www.ctl.io/knowledge-base/release-notes/2023/lumen-public-cloud-platform-end-of-life-notice/?).
- alternative: There is none.
-extends_documentation_fragment:
- - community.general.attributes
- - community.general.clc
-author:
- - "CLC Runner (@clc-runner)"
-attributes:
- check_mode:
- support: full
- diff_mode:
- support: none
-options:
- server_ids:
- description:
- - The list of CLC server IDs.
- type: list
- required: true
- elements: str
- expiration_days:
- description:
- - The number of days to keep the server snapshot before it expires.
- type: int
- default: 7
- required: false
- state:
- description:
- - The state to insure that the provided resources are in.
- type: str
- default: 'present'
- required: false
- choices: ['present', 'absent', 'restore']
- wait:
- description:
- - Whether to wait for the provisioning tasks to finish before returning.
- default: 'True'
- required: false
- type: str
-"""
-
-EXAMPLES = r"""
-# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples
-
-- name: Create server snapshot
- community.general.clc_server_snapshot:
- server_ids:
- - UC1TEST-SVR01
- - UC1TEST-SVR02
- expiration_days: 10
- wait: true
- state: present
-
-- name: Restore server snapshot
- community.general.clc_server_snapshot:
- server_ids:
- - UC1TEST-SVR01
- - UC1TEST-SVR02
- wait: true
- state: restore
-
-- name: Delete server snapshot
- community.general.clc_server_snapshot:
- server_ids:
- - UC1TEST-SVR01
- - UC1TEST-SVR02
- wait: true
- state: absent
-"""
-
-RETURN = r"""
-server_ids:
- description: The list of server IDs that are changed.
- returned: success
- type: list
- sample: ["UC1TEST-SVR01", "UC1TEST-SVR02"]
-"""
-
-__version__ = '${version}'
-
-import os
-import traceback
-
-from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
-
-REQUESTS_IMP_ERR = None
-try:
- import requests
-except ImportError:
- REQUESTS_IMP_ERR = traceback.format_exc()
- REQUESTS_FOUND = False
-else:
- REQUESTS_FOUND = True
-
-#
-# Requires the clc-python-sdk.
-# sudo pip install clc-sdk
-#
-CLC_IMP_ERR = None
-try:
- import clc as clc_sdk
- from clc import CLCException
-except ImportError:
- CLC_IMP_ERR = traceback.format_exc()
- CLC_FOUND = False
- clc_sdk = None
-else:
- CLC_FOUND = True
-
-from ansible.module_utils.basic import AnsibleModule, missing_required_lib
-
-
-class ClcSnapshot:
-
- clc = clc_sdk
- module = None
-
- def __init__(self, module):
- """
- Construct module
- """
- self.module = module
-
- if not CLC_FOUND:
- self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR)
- if not REQUESTS_FOUND:
- self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR)
- if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'):
- self.module.fail_json(
- msg='requests library version should be >= 2.5.0')
-
- self._set_user_agent(self.clc)
-
- def process_request(self):
- """
- Process the request - Main Code Path
- :return: Returns with either an exit_json or fail_json
- """
- p = self.module.params
- server_ids = p['server_ids']
- expiration_days = p['expiration_days']
- state = p['state']
- request_list = []
- changed = False
- changed_servers = []
-
- self._set_clc_credentials_from_env()
- if state == 'present':
- changed, request_list, changed_servers = self.ensure_server_snapshot_present(
- server_ids=server_ids,
- expiration_days=expiration_days)
- elif state == 'absent':
- changed, request_list, changed_servers = self.ensure_server_snapshot_absent(
- server_ids=server_ids)
- elif state == 'restore':
- changed, request_list, changed_servers = self.ensure_server_snapshot_restore(
- server_ids=server_ids)
-
- self._wait_for_requests_to_complete(request_list)
- return self.module.exit_json(
- changed=changed,
- server_ids=changed_servers)
-
- def ensure_server_snapshot_present(self, server_ids, expiration_days):
- """
- Ensures the given set of server_ids have the snapshots created
- :param server_ids: The list of server_ids to create the snapshot
- :param expiration_days: The number of days to keep the snapshot
- :return: (changed, request_list, changed_servers)
- changed: A flag indicating whether any change was made
- request_list: the list of clc request objects from CLC API call
- changed_servers: The list of servers ids that are modified
- """
- request_list = []
- changed = False
- servers = self._get_servers_from_clc(
- server_ids,
- 'Failed to obtain server list from the CLC API')
- servers_to_change = [
- server for server in servers if len(
- server.GetSnapshots()) == 0]
- for server in servers_to_change:
- changed = True
- if not self.module.check_mode:
- request = self._create_server_snapshot(server, expiration_days)
- request_list.append(request)
- changed_servers = [
- server.id for server in servers_to_change if server.id]
- return changed, request_list, changed_servers
-
- def _create_server_snapshot(self, server, expiration_days):
- """
- Create the snapshot for the CLC server
- :param server: the CLC server object
- :param expiration_days: The number of days to keep the snapshot
- :return: the create request object from CLC API Call
- """
- result = None
- try:
- result = server.CreateSnapshot(
- delete_existing=True,
- expiration_days=expiration_days)
- except CLCException as ex:
- self.module.fail_json(msg='Failed to create snapshot for server : {0}. {1}'.format(
- server.id, ex.response_text
- ))
- return result
-
- def ensure_server_snapshot_absent(self, server_ids):
- """
- Ensures the given set of server_ids have the snapshots removed
- :param server_ids: The list of server_ids to delete the snapshot
- :return: (changed, request_list, changed_servers)
- changed: A flag indicating whether any change was made
- request_list: the list of clc request objects from CLC API call
- changed_servers: The list of servers ids that are modified
- """
- request_list = []
- changed = False
- servers = self._get_servers_from_clc(
- server_ids,
- 'Failed to obtain server list from the CLC API')
- servers_to_change = [
- server for server in servers if len(
- server.GetSnapshots()) > 0]
- for server in servers_to_change:
- changed = True
- if not self.module.check_mode:
- request = self._delete_server_snapshot(server)
- request_list.append(request)
- changed_servers = [
- server.id for server in servers_to_change if server.id]
- return changed, request_list, changed_servers
-
- def _delete_server_snapshot(self, server):
- """
- Delete snapshot for the CLC server
- :param server: the CLC server object
- :return: the delete snapshot request object from CLC API
- """
- result = None
- try:
- result = server.DeleteSnapshot()
- except CLCException as ex:
- self.module.fail_json(msg='Failed to delete snapshot for server : {0}. {1}'.format(
- server.id, ex.response_text
- ))
- return result
-
- def ensure_server_snapshot_restore(self, server_ids):
- """
- Ensures the given set of server_ids have the snapshots restored
- :param server_ids: The list of server_ids to delete the snapshot
- :return: (changed, request_list, changed_servers)
- changed: A flag indicating whether any change was made
- request_list: the list of clc request objects from CLC API call
- changed_servers: The list of servers ids that are modified
- """
- request_list = []
- changed = False
- servers = self._get_servers_from_clc(
- server_ids,
- 'Failed to obtain server list from the CLC API')
- servers_to_change = [
- server for server in servers if len(
- server.GetSnapshots()) > 0]
- for server in servers_to_change:
- changed = True
- if not self.module.check_mode:
- request = self._restore_server_snapshot(server)
- request_list.append(request)
- changed_servers = [
- server.id for server in servers_to_change if server.id]
- return changed, request_list, changed_servers
-
- def _restore_server_snapshot(self, server):
- """
- Restore snapshot for the CLC server
- :param server: the CLC server object
- :return: the restore snapshot request object from CLC API
- """
- result = None
- try:
- result = server.RestoreSnapshot()
- except CLCException as ex:
- self.module.fail_json(msg='Failed to restore snapshot for server : {0}. {1}'.format(
- server.id, ex.response_text
- ))
- return result
-
- def _wait_for_requests_to_complete(self, requests_lst):
- """
- Waits until the CLC requests are complete if the wait argument is True
- :param requests_lst: The list of CLC request objects
- :return: none
- """
- if not self.module.params['wait']:
- return
- for request in requests_lst:
- request.WaitUntilComplete()
- for request_details in request.requests:
- if request_details.Status() != 'succeeded':
- self.module.fail_json(
- msg='Unable to process server snapshot request')
-
- @staticmethod
- def define_argument_spec():
- """
- This function defines the dictionary object required for
- package module
- :return: the package dictionary object
- """
- argument_spec = dict(
- server_ids=dict(type='list', required=True, elements='str'),
- expiration_days=dict(default=7, type='int'),
- wait=dict(default=True),
- state=dict(
- default='present',
- choices=[
- 'present',
- 'absent',
- 'restore']),
- )
- return argument_spec
-
- def _get_servers_from_clc(self, server_list, message):
- """
- Internal function to fetch list of CLC server objects from a list of server ids
- :param server_list: The list of server ids
- :param message: The error message to throw in case of any error
- :return the list of CLC server objects
- """
- try:
- return self.clc.v2.Servers(server_list).servers
- except CLCException as ex:
- return self.module.fail_json(msg=message + ': %s' % ex)
-
- def _set_clc_credentials_from_env(self):
- """
- Set the CLC Credentials on the sdk by reading environment variables
- :return: none
- """
- env = os.environ
- v2_api_token = env.get('CLC_V2_API_TOKEN', False)
- v2_api_username = env.get('CLC_V2_API_USERNAME', False)
- v2_api_passwd = env.get('CLC_V2_API_PASSWD', False)
- clc_alias = env.get('CLC_ACCT_ALIAS', False)
- api_url = env.get('CLC_V2_API_URL', False)
-
- if api_url:
- self.clc.defaults.ENDPOINT_URL_V2 = api_url
-
- if v2_api_token and clc_alias:
- self.clc._LOGIN_TOKEN_V2 = v2_api_token
- self.clc._V2_ENABLED = True
- self.clc.ALIAS = clc_alias
- elif v2_api_username and v2_api_passwd:
- self.clc.v2.SetCredentials(
- api_username=v2_api_username,
- api_passwd=v2_api_passwd)
- else:
- return self.module.fail_json(
- msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD "
- "environment variables")
-
- @staticmethod
- def _set_user_agent(clc):
- if hasattr(clc, 'SetRequestsSession'):
- agent_string = "ClcAnsibleModule/" + __version__
- ses = requests.Session()
- ses.headers.update({"Api-Client": agent_string})
- ses.headers['User-Agent'] += " " + agent_string
- clc.SetRequestsSession(ses)
-
-
-def main():
- """
- Main function
- :return: None
- """
- module = AnsibleModule(
- argument_spec=ClcSnapshot.define_argument_spec(),
- supports_check_mode=True
- )
- clc_snapshot = ClcSnapshot(module)
- clc_snapshot.process_request()
-
-
-if __name__ == '__main__':
- main()
diff --git a/plugins/modules/cloud_init_data_facts.py b/plugins/modules/cloud_init_data_facts.py
index dd9825858e..544a663e5c 100644
--- a/plugins/modules/cloud_init_data_facts.py
+++ b/plugins/modules/cloud_init_data_facts.py
@@ -50,38 +50,41 @@ cloud_init_data_facts:
description: Facts of result and status.
returned: success
type: dict
- sample: '{
- "status": {
+ sample:
+ {
+ "status": {
"v1": {
- "datasource": "DataSourceCloudStack",
- "errors": []
- },
- "result": {
- "v1": {
- "datasource": "DataSourceCloudStack",
- "init": {
- "errors": [],
- "finished": 1522066377.0185432,
- "start": 1522066375.2648022
- },
- "init-local": {
- "errors": [],
- "finished": 1522066373.70919,
- "start": 1522066373.4726632
- },
- "modules-config": {
- "errors": [],
- "finished": 1522066380.9097016,
- "start": 1522066379.0011985
- },
- "modules-final": {
- "errors": [],
- "finished": 1522066383.56594,
- "start": 1522066382.3449218
- },
- "stage": null
+ "datasource": "DataSourceCloudStack",
+ "errors": []
}
- }'
+ },
+ "result": {
+ "v1": {
+ "datasource": "DataSourceCloudStack",
+ "init": {
+ "errors": [],
+ "finished": 1522066377.0185432,
+ "start": 1522066375.2648022
+ },
+ "init-local": {
+ "errors": [],
+ "finished": 1522066373.70919,
+ "start": 1522066373.4726632
+ },
+ "modules-config": {
+ "errors": [],
+ "finished": 1522066380.9097016,
+ "start": 1522066379.0011985
+ },
+ "modules-final": {
+ "errors": [],
+ "finished": 1522066383.56594,
+ "start": 1522066382.3449218
+ },
+ "stage": null
+ }
+ }
+ }
"""
import os
diff --git a/plugins/modules/cloudflare_dns.py b/plugins/modules/cloudflare_dns.py
index e1b75e30ca..fafca00b50 100644
--- a/plugins/modules/cloudflare_dns.py
+++ b/plugins/modules/cloudflare_dns.py
@@ -127,7 +127,7 @@ options:
description:
- Whether the record should be the only one for that record type and record name.
- Only use with O(state=present).
- - This will delete all other records with the same record name and type.
+ - This deletes all other records with the same record name and type.
type: bool
state:
description:
@@ -157,8 +157,9 @@ options:
- The type of DNS record to create. Required if O(state=present).
- Support for V(SPF) has been removed from community.general 9.0.0 since that record type is no longer supported by
CloudFlare.
+ - Support for V(PTR) has been added in community.general 11.1.0.
type: str
- choices: [A, AAAA, CNAME, DS, MX, NS, SRV, SSHFP, TLSA, CAA, TXT]
+ choices: [A, AAAA, CNAME, DS, MX, NS, SRV, SSHFP, TLSA, CAA, TXT, PTR]
value:
description:
- The record value.
@@ -311,6 +312,14 @@ EXAMPLES = r"""
algorithm: 8
hash_type: 2
value: B4EB5AC4467D2DFB3BAF9FB9961DC1B6FED54A58CDFAA3E465081EC86F89BFAB
+
+- name: Create PTR record "1.2.0.192.in-addr.arpa" with value "test.example.com"
+ community.general.cloudflare_dns:
+ zone: 2.0.192.in-addr.arpa
+ record: 1
+ type: PTR
+ value: test.example.com
+ state: present
"""
RETURN = r"""
@@ -345,8 +354,16 @@ record:
description: Additional record data.
returned: success, if type is SRV, DS, SSHFP TLSA or CAA
type: dict
- sample: {name: "jabber", port: 8080, priority: 10, proto: "_tcp", service: "_xmpp", target: "jabberhost.sample.com",
- weight: 5}
+ sample:
+ {
+ "name": "jabber",
+ "port": 8080,
+ "priority": 10,
+ "proto": "_tcp",
+ "service": "_xmpp",
+ "target": "jabberhost.sample.com",
+ "weight": 5
+ }
id:
description: The record ID.
returned: success
@@ -361,7 +378,7 @@ record:
description: Extra Cloudflare-specific information about the record.
returned: success
type: dict
- sample: {auto_added: false}
+ sample: {"auto_added": false}
modified_on:
description: Record modification date.
returned: success
@@ -392,7 +409,7 @@ record:
returned: success
type: list
elements: str
- sample: ['production', 'app']
+ sample: ["production", "app"]
version_added: 10.1.0
tags_modified_on:
description: When the record tags were last modified. Omitted if there are no tags.
@@ -431,9 +448,11 @@ from ansible.module_utils.urls import fetch_url
def lowercase_string(param):
- if not isinstance(param, str):
- return param
- return param.lower()
+ return param.lower() if isinstance(param, str) else param
+
+
+def join_str(sep, *args):
+ return sep.join([str(arg) for arg in args])
class CloudflareAPI(object):
@@ -479,29 +498,29 @@ class CloudflareAPI(object):
if (self.type == 'AAAA') and (self.value is not None):
self.value = self.value.lower()
- if (self.type == 'SRV'):
+ if self.type == 'SRV':
if (self.proto is not None) and (not self.proto.startswith('_')):
- self.proto = '_' + self.proto
+ self.proto = '_{0}'.format(self.proto)
if (self.service is not None) and (not self.service.startswith('_')):
- self.service = '_' + self.service
+ self.service = '_{0}'.format(self.service)
- if (self.type == 'TLSA'):
+ if self.type == 'TLSA':
if (self.proto is not None) and (not self.proto.startswith('_')):
- self.proto = '_' + self.proto
+ self.proto = '_{0}'.format(self.proto)
if (self.port is not None):
- self.port = '_' + str(self.port)
+ self.port = '_{0}'.format(self.port)
if not self.record.endswith(self.zone):
- self.record = self.record + '.' + self.zone
+ self.record = join_str('.', self.record, self.zone)
- if (self.type == 'DS'):
+ if self.type == 'DS':
if self.record == self.zone:
self.module.fail_json(msg="DS records only apply to subdomains.")
def _cf_simple_api_call(self, api_call, method='GET', payload=None):
if self.api_token:
headers = {
- 'Authorization': 'Bearer ' + self.api_token,
+ 'Authorization': 'Bearer {0}'.format(self.api_token),
'Content-Type': 'application/json',
}
else:
@@ -601,7 +620,7 @@ class CloudflareAPI(object):
else:
raw_api_call = api_call
while next_page <= pagination['total_pages']:
- raw_api_call += '?' + '&'.join(parameters)
+ raw_api_call += '?{0}'.format('&'.join(parameters))
result, status = self._cf_simple_api_call(raw_api_call, method, payload)
data += result['result']
next_page += 1
@@ -626,8 +645,8 @@ class CloudflareAPI(object):
name = self.zone
param = ''
if name:
- param = '?' + urlencode({'name': name})
- zones, status = self._cf_api_call('/zones' + param)
+ param = '?{0}'.format(urlencode({'name': name}))
+ zones, status = self._cf_api_call('/zones{0}'.format(param))
return zones
def get_dns_records(self, zone_name=None, type=None, record=None, value=''):
@@ -652,48 +671,40 @@ class CloudflareAPI(object):
if value:
query['content'] = value
if query:
- api_call += '?' + urlencode(query)
+ api_call += '?{0}'.format(urlencode(query))
records, status = self._cf_api_call(api_call)
return records
- def delete_dns_records(self, **kwargs):
- params = {}
- for param in ['port', 'proto', 'service', 'solo', 'type', 'record', 'value', 'weight', 'zone',
- 'algorithm', 'cert_usage', 'hash_type', 'selector', 'key_tag', 'flag', 'tag']:
- if param in kwargs:
- params[param] = kwargs[param]
- else:
- params[param] = getattr(self, param)
-
+ def delete_dns_records(self, solo):
records = []
- content = params['value']
- search_record = params['record']
- if params['type'] == 'SRV':
- if not (params['value'] is None or params['value'] == ''):
- content = str(params['weight']) + '\t' + str(params['port']) + '\t' + params['value']
- search_record = params['service'] + '.' + params['proto'] + '.' + params['record']
- elif params['type'] == 'DS':
- if not (params['value'] is None or params['value'] == ''):
- content = str(params['key_tag']) + '\t' + str(params['algorithm']) + '\t' + str(params['hash_type']) + '\t' + params['value']
- elif params['type'] == 'SSHFP':
- if not (params['value'] is None or params['value'] == ''):
- content = str(params['algorithm']) + ' ' + str(params['hash_type']) + ' ' + params['value'].upper()
- elif params['type'] == 'TLSA':
- if not (params['value'] is None or params['value'] == ''):
- content = str(params['cert_usage']) + '\t' + str(params['selector']) + '\t' + str(params['hash_type']) + '\t' + params['value']
- search_record = params['port'] + '.' + params['proto'] + '.' + params['record']
- if params['solo']:
+ content = self.value
+ search_record = self.record
+ if self.type == 'SRV':
+ if not (self.value is None or self.value == ''):
+ content = join_str('\t', self.weight, self.port, self.value)
+ search_record = join_str('.', self.service, self.proto, self.record)
+ elif self.type == 'DS':
+ if not (self.value is None or self.value == ''):
+ content = join_str('\t', self.key_tag, self.algorithm, self.hash_type, self.value)
+ elif self.type == 'SSHFP':
+ if not (self.value is None or self.value == ''):
+ content = join_str(' ', self.algorithm, self.hash_type, self.value.upper())
+ elif self.type == 'TLSA':
+ if not (self.value is None or self.value == ''):
+ content = join_str('\t', self.cert_usage, self.selector, self.hash_type, self.value)
+ search_record = join_str('.', self.port, self.proto, self.record)
+ if solo:
search_value = None
else:
search_value = content
- zone_id = self._get_zone_id(params['zone'])
- records = self.get_dns_records(params['zone'], params['type'], search_record, search_value)
+ zone_id = self._get_zone_id(self.zone)
+ records = self.get_dns_records(self.zone, self.type, search_record, search_value)
for rr in records:
- if params['solo']:
- if not ((rr['type'] == params['type']) and (rr['name'] == search_record) and (rr['content'] == content)):
+ if solo:
+ if not ((rr['type'] == self.type) and (rr['name'] == search_record) and (rr['content'] == content)):
self.changed = True
if not self.module.check_mode:
result, info = self._cf_api_call('/zones/{0}/dns_records/{1}'.format(zone_id, rr['id']), 'DELETE')
@@ -703,156 +714,146 @@ class CloudflareAPI(object):
result, info = self._cf_api_call('/zones/{0}/dns_records/{1}'.format(zone_id, rr['id']), 'DELETE')
return self.changed
- def ensure_dns_record(self, **kwargs):
- params = {}
- for param in ['port', 'priority', 'proto', 'proxied', 'service', 'ttl', 'type', 'record', 'value', 'weight', 'zone',
- 'algorithm', 'cert_usage', 'hash_type', 'selector', 'key_tag', 'flag', 'tag', 'tags', 'comment']:
- if param in kwargs:
- params[param] = kwargs[param]
- else:
- params[param] = getattr(self, param)
-
- search_value = params['value']
- search_record = params['record']
+ def ensure_dns_record(self):
+ search_value = self.value
+ search_record = self.record
new_record = None
- if (params['type'] is None) or (params['record'] is None):
- self.module.fail_json(msg="You must provide a type and a record to create a new record")
- if (params['type'] in ['A', 'AAAA', 'CNAME', 'TXT', 'MX', 'NS']):
- if not params['value']:
+ if self.type in ['A', 'AAAA', 'CNAME', 'TXT', 'MX', 'NS', 'PTR']:
+ if not self.value:
self.module.fail_json(msg="You must provide a non-empty value to create this record type")
# there can only be one CNAME per record
# ignoring the value when searching for existing
# CNAME records allows us to update the value if it
# changes
- if params['type'] == 'CNAME':
+ if self.type == 'CNAME':
search_value = None
new_record = {
- "type": params['type'],
- "name": params['record'],
- "content": params['value'],
- "ttl": params['ttl']
+ "type": self.type,
+ "name": self.record,
+ "content": self.value,
+ "ttl": self.ttl
}
- if (params['type'] in ['A', 'AAAA', 'CNAME']):
- new_record["proxied"] = params["proxied"]
+ if self.type in ['A', 'AAAA', 'CNAME']:
+ new_record["proxied"] = self.proxied
- if params['type'] == 'MX':
- for attr in [params['priority'], params['value']]:
+ if self.type == 'MX':
+ for attr in [self.priority, self.value]:
if (attr is None) or (attr == ''):
self.module.fail_json(msg="You must provide priority and a value to create this record type")
new_record = {
- "type": params['type'],
- "name": params['record'],
- "content": params['value'],
- "priority": params['priority'],
- "ttl": params['ttl']
+ "type": self.type,
+ "name": self.record,
+ "content": self.value,
+ "priority": self.priority,
+ "ttl": self.ttl
}
- if params['type'] == 'SRV':
- for attr in [params['port'], params['priority'], params['proto'], params['service'], params['weight'], params['value']]:
+ if self.type == 'SRV':
+ for attr in [self.port, self.priority, self.proto, self.service, self.weight, self.value]:
if (attr is None) or (attr == ''):
self.module.fail_json(msg="You must provide port, priority, proto, service, weight and a value to create this record type")
srv_data = {
- "target": params['value'],
- "port": params['port'],
- "weight": params['weight'],
- "priority": params['priority'],
+ "target": self.value,
+ "port": self.port,
+ "weight": self.weight,
+ "priority": self.priority,
}
new_record = {
- "type": params['type'],
- "name": params['service'] + '.' + params['proto'] + '.' + params['record'],
- "ttl": params['ttl'],
+ "type": self.type,
+ "name": join_str('.', self.service, self.proto, self.record),
+ "ttl": self.ttl,
'data': srv_data,
}
- search_value = str(params['weight']) + '\t' + str(params['port']) + '\t' + params['value']
- search_record = params['service'] + '.' + params['proto'] + '.' + params['record']
+ search_value = join_str('\t', self.weight, self.port, self.value)
+ search_record = join_str('.', self.service, self.proto, self.record)
- if params['type'] == 'DS':
- for attr in [params['key_tag'], params['algorithm'], params['hash_type'], params['value']]:
+ if self.type == 'DS':
+ for attr in [self.key_tag, self.algorithm, self.hash_type, self.value]:
if (attr is None) or (attr == ''):
self.module.fail_json(msg="You must provide key_tag, algorithm, hash_type and a value to create this record type")
ds_data = {
- "key_tag": params['key_tag'],
- "algorithm": params['algorithm'],
- "digest_type": params['hash_type'],
- "digest": params['value'],
+ "key_tag": self.key_tag,
+ "algorithm": self.algorithm,
+ "digest_type": self.hash_type,
+ "digest": self.value,
}
new_record = {
- "type": params['type'],
- "name": params['record'],
+ "type": self.type,
+ "name": self.record,
'data': ds_data,
- "ttl": params['ttl'],
+ "ttl": self.ttl,
}
- search_value = str(params['key_tag']) + '\t' + str(params['algorithm']) + '\t' + str(params['hash_type']) + '\t' + params['value']
+ search_value = join_str('\t', self.key_tag, self.algorithm, self.hash_type, self.value)
- if params['type'] == 'SSHFP':
- for attr in [params['algorithm'], params['hash_type'], params['value']]:
+ if self.type == 'SSHFP':
+ for attr in [self.algorithm, self.hash_type, self.value]:
if (attr is None) or (attr == ''):
self.module.fail_json(msg="You must provide algorithm, hash_type and a value to create this record type")
sshfp_data = {
- "fingerprint": params['value'].upper(),
- "type": params['hash_type'],
- "algorithm": params['algorithm'],
+ "fingerprint": self.value.upper(),
+ "type": self.hash_type,
+ "algorithm": self.algorithm,
}
new_record = {
- "type": params['type'],
- "name": params['record'],
+ "type": self.type,
+ "name": self.record,
'data': sshfp_data,
- "ttl": params['ttl'],
+ "ttl": self.ttl,
}
- search_value = str(params['algorithm']) + ' ' + str(params['hash_type']) + ' ' + params['value']
+ search_value = join_str(' ', self.algorithm, self.hash_type, self.value)
- if params['type'] == 'TLSA':
- for attr in [params['port'], params['proto'], params['cert_usage'], params['selector'], params['hash_type'], params['value']]:
+ if self.type == 'TLSA':
+ for attr in [self.port, self.proto, self.cert_usage, self.selector, self.hash_type, self.value]:
if (attr is None) or (attr == ''):
self.module.fail_json(msg="You must provide port, proto, cert_usage, selector, hash_type and a value to create this record type")
- search_record = params['port'] + '.' + params['proto'] + '.' + params['record']
+ search_record = join_str('.', self.port, self.proto, self.record)
tlsa_data = {
- "usage": params['cert_usage'],
- "selector": params['selector'],
- "matching_type": params['hash_type'],
- "certificate": params['value'],
+ "usage": self.cert_usage,
+ "selector": self.selector,
+ "matching_type": self.hash_type,
+ "certificate": self.value,
}
new_record = {
- "type": params['type'],
+ "type": self.type,
"name": search_record,
'data': tlsa_data,
- "ttl": params['ttl'],
+ "ttl": self.ttl,
}
- search_value = str(params['cert_usage']) + '\t' + str(params['selector']) + '\t' + str(params['hash_type']) + '\t' + params['value']
+ search_value = join_str('\t', self.cert_usage, self.selector, self.hash_type, self.value)
- if params['type'] == 'CAA':
- for attr in [params['flag'], params['tag'], params['value']]:
- if (attr is None) or (attr == ''):
+ if self.type == 'CAA':
+ for attr in [self.flag, self.tag, self.value]:
+ if attr == '':
self.module.fail_json(msg="You must provide flag, tag and a value to create this record type")
caa_data = {
- "flags": params['flag'],
- "tag": params['tag'],
- "value": params['value'],
+ "flags": self.flag,
+ "tag": self.tag,
+ "value": self.value,
}
new_record = {
- "type": params['type'],
- "name": params['record'],
+ "type": self.type,
+ "name": self.record,
'data': caa_data,
- "ttl": params['ttl'],
+ "ttl": self.ttl,
}
search_value = None
- new_record['comment'] = params['comment'] or None
- new_record['tags'] = params['tags'] or []
+ new_record['comment'] = self.comment or None
+ new_record['tags'] = self.tags or []
- zone_id = self._get_zone_id(params['zone'])
- records = self.get_dns_records(params['zone'], params['type'], search_record, search_value)
+ zone_id = self._get_zone_id(self.zone)
+ records = self.get_dns_records(self.zone, self.type, search_record, search_value)
# in theory this should be impossible as cloudflare does not allow
# the creation of duplicate records but lets cover it anyways
if len(records) > 1:
# As Cloudflare API cannot filter record containing quotes
# CAA records must be compared locally
- if params['type'] == 'CAA':
+ if self.type == 'CAA':
for rr in records:
if rr['data']['flags'] == caa_data['flags'] and rr['data']['tag'] == caa_data['tag'] and rr['data']['value'] == caa_data['value']:
return rr, self.changed
@@ -862,16 +863,16 @@ class CloudflareAPI(object):
if len(records) == 1:
cur_record = records[0]
do_update = False
- if (params['ttl'] is not None) and (cur_record['ttl'] != params['ttl']):
+ if (self.ttl is not None) and (cur_record['ttl'] != self.ttl):
do_update = True
- if (params['priority'] is not None) and ('priority' in cur_record) and (cur_record['priority'] != params['priority']):
+ if (self.priority is not None) and ('priority' in cur_record) and (cur_record['priority'] != self.priority):
do_update = True
- if ('proxied' in new_record) and ('proxied' in cur_record) and (cur_record['proxied'] != params['proxied']):
+ if ('proxied' in new_record) and ('proxied' in cur_record) and (cur_record['proxied'] != self.proxied):
do_update = True
if ('data' in new_record) and ('data' in cur_record):
- if (cur_record['data'] != new_record['data']):
+ if cur_record['data'] != new_record['data']:
do_update = True
- if (params['type'] == 'CNAME') and (cur_record['content'] != new_record['content']):
+ if (self.type == 'CNAME') and (cur_record['content'] != new_record['content']):
do_update = True
if cur_record['comment'] != new_record['comment']:
do_update = True
@@ -897,14 +898,9 @@ class CloudflareAPI(object):
def main():
module = AnsibleModule(
argument_spec=dict(
- api_token=dict(
- type="str",
- required=False,
- no_log=True,
- fallback=(env_fallback, ["CLOUDFLARE_TOKEN"]),
- ),
- account_api_key=dict(type='str', required=False, no_log=True, aliases=['account_api_token']),
- account_email=dict(type='str', required=False),
+ api_token=dict(type="str", no_log=True, fallback=(env_fallback, ["CLOUDFLARE_TOKEN"])),
+ account_api_key=dict(type='str', no_log=True, aliases=['account_api_token']),
+ account_email=dict(type='str'),
algorithm=dict(type='int'),
cert_usage=dict(type='int', choices=[0, 1, 2, 3]),
comment=dict(type='str'),
@@ -924,7 +920,7 @@ def main():
state=dict(type='str', default='present', choices=['absent', 'present']),
timeout=dict(type='int', default=30),
ttl=dict(type='int', default=1),
- type=dict(type='str', choices=['A', 'AAAA', 'CNAME', 'DS', 'MX', 'NS', 'SRV', 'SSHFP', 'TLSA', 'CAA', 'TXT']),
+ type=dict(type='str', choices=['A', 'AAAA', 'CNAME', 'DS', 'MX', 'NS', 'SRV', 'SSHFP', 'TLSA', 'CAA', 'TXT', 'PTR']),
value=dict(type='str', aliases=['content']),
weight=dict(type='int', default=1),
zone=dict(type='str', required=True, aliases=['domain']),
@@ -933,20 +929,21 @@ def main():
required_if=[
('state', 'present', ['record', 'type', 'value']),
('state', 'absent', ['record']),
- ('type', 'SRV', ['proto', 'service']),
+ ('type', 'SRV', ['proto', 'service', 'value']),
('type', 'TLSA', ['proto', 'port']),
- ('type', 'CAA', ['flag', 'tag']),
+ ('type', 'CAA', ['flag', 'tag', 'value']),
+ ],
+ required_together=[
+ ('account_api_key', 'account_email'),
+ ],
+ required_one_of=[
+ ['api_token', 'account_api_key'],
],
)
- if not module.params['api_token'] and not (module.params['account_api_key'] and module.params['account_email']):
- module.fail_json(msg="Either api_token or account_api_key and account_email params are required.")
if module.params['type'] == 'SRV':
- if not ((module.params['weight'] is not None and module.params['port'] is not None
- and not (module.params['value'] is None or module.params['value'] == ''))
- or (module.params['weight'] is None and module.params['port'] is None
- and (module.params['value'] is None or module.params['value'] == ''))):
- module.fail_json(msg="For SRV records the params weight, port and value all need to be defined, or not at all.")
+ if not module.params['value'] == '':
+ module.fail_json(msg="For SRV records the params weight, port and value all need to be defined.")
if module.params['type'] == 'SSHFP':
if not ((module.params['algorithm'] is not None and module.params['hash_type'] is not None
@@ -963,11 +960,8 @@ def main():
module.fail_json(msg="For TLSA records the params cert_usage, selector, hash_type and value all need to be defined, or not at all.")
if module.params['type'] == 'CAA':
- if not ((module.params['flag'] is not None and module.params['tag'] is not None
- and not (module.params['value'] is None or module.params['value'] == ''))
- or (module.params['flag'] is None and module.params['tag'] is None
- and (module.params['value'] is None or module.params['value'] == ''))):
- module.fail_json(msg="For CAA records the params flag, tag and value all need to be defined, or not at all.")
+ if not module.params['value'] == '':
+ module.fail_json(msg="For CAA records the params flag, tag and value all need to be defined.")
if module.params['type'] == 'DS':
if not ((module.params['key_tag'] is not None and module.params['algorithm'] is not None and module.params['hash_type'] is not None
diff --git a/plugins/modules/cobbler_sync.py b/plugins/modules/cobbler_sync.py
index 95a3241b98..b1c92a1690 100644
--- a/plugins/modules/cobbler_sync.py
+++ b/plugins/modules/cobbler_sync.py
@@ -42,12 +42,12 @@ options:
type: str
use_ssl:
description:
- - If V(false), an HTTP connection will be used instead of the default HTTPS connection.
+ - If V(false), an HTTP connection is used instead of the default HTTPS connection.
type: bool
default: true
validate_certs:
description:
- - If V(false), SSL certificates will not be validated.
+ - If V(false), SSL certificates are not validated.
- This should only set to V(false) when used on personally controlled sites using self-signed certificates.
type: bool
default: true
diff --git a/plugins/modules/cobbler_system.py b/plugins/modules/cobbler_system.py
index fd1db6bf3e..a1a400928e 100644
--- a/plugins/modules/cobbler_system.py
+++ b/plugins/modules/cobbler_system.py
@@ -42,12 +42,12 @@ options:
type: str
use_ssl:
description:
- - If V(false), an HTTP connection will be used instead of the default HTTPS connection.
+ - If V(false), an HTTP connection is used instead of the default HTTPS connection.
type: bool
default: true
validate_certs:
description:
- - If V(false), SSL certificates will not be validated.
+ - If V(false), SSL certificates are not validated.
- This should only set to V(false) when used on personally controlled sites using self-signed certificates.
type: bool
default: true
@@ -161,6 +161,7 @@ from ansible.module_utils.common.text.converters import to_text
from ansible_collections.community.general.plugins.module_utils.datetime import (
now,
)
+from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
IFPROPS_MAPPING = dict(
bondingopts='bonding_opts',
@@ -278,7 +279,11 @@ def main():
if system:
# Update existing entry
- system_id = conn.get_system_handle(name, token)
+ system_id = ''
+ if LooseVersion(str(conn.version())) >= LooseVersion('3.4'):
+ system_id = conn.get_system_handle(name)
+ else:
+ system_id = conn.get_system_handle(name, token)
for key, value in iteritems(module.params['properties']):
if key not in system:
diff --git a/plugins/modules/composer.py b/plugins/modules/composer.py
index 6c935bfe75..735b4d2d36 100644
--- a/plugins/modules/composer.py
+++ b/plugins/modules/composer.py
@@ -17,7 +17,7 @@ author:
short_description: Dependency Manager for PHP
description:
- Composer is a tool for dependency management in PHP. It allows you to declare the dependent libraries your project needs
- and it will install them in your project for you.
+ and it installs them in your project for you.
extends_documentation_fragment:
- community.general.attributes
attributes:
@@ -45,7 +45,7 @@ options:
type: path
description:
- Directory of your project (see C(--working-dir)). This is required when the command is not run globally.
- - Will be ignored if O(global_command=true).
+ - This is ignored if O(global_command=true).
global_command:
description:
- Runs the specified command globally.
diff --git a/plugins/modules/consul.py b/plugins/modules/consul.py
index 645ffe5bbd..9c36ba65f2 100644
--- a/plugins/modules/consul.py
+++ b/plugins/modules/consul.py
@@ -21,8 +21,8 @@ description:
name and ID respectively by appending V(service:) Node level checks require a O(check_name) and optionally a O(check_id).
- Currently, there is no complete way to retrieve the script, interval or TTL metadata for a registered check. Without this
metadata it is not possible to tell if the data supplied with ansible represents a change to a check. As a result this
- does not attempt to determine changes and will always report a changed occurred. An API method is planned to supply this
- metadata so at that stage change management will be added.
+ does not attempt to determine changes and it always reports a changed occurred. An API method is planned to supply this
+ metadata so at that stage change management is to be added.
- See U(http://consul.io) for more details.
requirements:
- python-consul
@@ -83,25 +83,25 @@ options:
service_address:
type: str
description:
- - The address to advertise that the service will be listening on. This value will be passed as the C(address) parameter
- to Consul's C(/v1/agent/service/register) API method, so refer to the Consul API documentation for further details.
+ - The address to advertise that the service is listening on. This value is passed as the C(address) parameter to Consul's
+ C(/v1/agent/service/register) API method, so refer to the Consul API documentation for further details.
tags:
type: list
elements: str
description:
- - Tags that will be attached to the service registration.
+ - Tags that are attached to the service registration.
script:
type: str
description:
- - The script/command that will be run periodically to check the health of the service.
+ - The script/command that is run periodically to check the health of the service.
- Requires O(interval) to be provided.
- Mutually exclusive with O(ttl), O(tcp) and O(http).
interval:
type: str
description:
- - The interval at which the service check will be run. This is a number with a V(s) or V(m) suffix to signify the units
- of seconds or minutes, for example V(15s) or V(1m). If no suffix is supplied V(s) will be used by default, for example
- V(10) will be V(10s).
+ - The interval at which the service check is run. This is a number with a V(s) or V(m) suffix to signify the units of
+ seconds or minutes, for example V(15s) or V(1m). If no suffix is supplied V(s) is used by default, for example V(10)
+ is V(10s).
- Required if one of the parameters O(script), O(http), or O(tcp) is specified.
check_id:
type: str
@@ -122,25 +122,25 @@ options:
ttl:
type: str
description:
- - Checks can be registered with a TTL instead of a O(script) and O(interval) this means that the service will check
- in with the agent before the TTL expires. If it does not the check will be considered failed. Required if registering
- a check and the script an interval are missing Similar to the interval this is a number with a V(s) or V(m) suffix
- to signify the units of seconds or minutes, for example V(15s) or V(1m). If no suffix is supplied V(s) will be used
- by default, for example V(10) will be V(10s).
+ - Checks can be registered with a TTL instead of a O(script) and O(interval) this means that the service checks in with
+ the agent before the TTL expires. If it does not the check is considered failed. Required if registering a check and
+ the script an interval are missing Similar to the interval this is a number with a V(s) or V(m) suffix to signify
+ the units of seconds or minutes, for example V(15s) or V(1m). If no suffix is supplied V(s) is used by default, for
+ example V(10) is equivalent to V(10s).
- Mutually exclusive with O(script), O(tcp) and O(http).
tcp:
type: str
description:
- - Checks can be registered with a TCP port. This means that Consul will check if the connection attempt to that port
- is successful (that is, the port is currently accepting connections). The format is V(host:port), for example V(localhost:80).
+ - Checks can be registered with a TCP port. This means that Consul checks if the connection attempt to that port is
+ successful (that is, the port is currently accepting connections). The format is V(host:port), for example V(localhost:80).
- Requires O(interval) to be provided.
- Mutually exclusive with O(script), O(ttl) and O(http).
version_added: '1.3.0'
http:
type: str
description:
- - Checks can be registered with an HTTP endpoint. This means that Consul will check that the http endpoint returns a
- successful HTTP status.
+ - Checks can be registered with an HTTP endpoint. This means that Consul checks that the http endpoint returns a successful
+ HTTP status.
- Requires O(interval) to be provided.
- Mutually exclusive with O(script), O(ttl) and O(tcp).
timeout:
@@ -148,7 +148,7 @@ options:
description:
- A custom HTTP check timeout. The Consul default is 10 seconds. Similar to the interval this is a number with a V(s)
or V(m) suffix to signify the units of seconds or minutes, for example V(15s) or V(1m). If no suffix is supplied V(s)
- will be used by default, for example V(10) will be V(10s).
+ is used by default, for example V(10) is equivalent to V(10s).
token:
type: str
description:
diff --git a/plugins/modules/consul_acl_bootstrap.py b/plugins/modules/consul_acl_bootstrap.py
index 7002c3d549..ba6adf2dd3 100644
--- a/plugins/modules/consul_acl_bootstrap.py
+++ b/plugins/modules/consul_acl_bootstrap.py
@@ -50,7 +50,7 @@ RETURN = r"""
result:
description:
- The bootstrap result as returned by the Consul HTTP API.
- - B(Note:) If O(bootstrap_secret) has been specified the C(SecretID) and C(ID) will not contain the secret but C(VALUE_SPECIFIED_IN_NO_LOG_PARAMETER).
+ - B(Note:) If O(bootstrap_secret) has been specified the C(SecretID) and C(ID) do not contain the secret but C(VALUE_SPECIFIED_IN_NO_LOG_PARAMETER).
If you pass O(bootstrap_secret), make sure your playbook/role does not depend on this return value!
returned: changed
type: dict
diff --git a/plugins/modules/consul_agent_check.py b/plugins/modules/consul_agent_check.py
index ca1639063c..51d9715e88 100644
--- a/plugins/modules/consul_agent_check.py
+++ b/plugins/modules/consul_agent_check.py
@@ -17,9 +17,9 @@ description:
- Allows the addition, modification and deletion of checks in a Consul cluster using the agent. For more details on using
and configuring Checks, see U(https://developer.hashicorp.com/consul/api-docs/agent/check).
- Currently, there is no complete way to retrieve the script, interval or TTL metadata for a registered check. Without this
- metadata it is not possible to tell if the data supplied with ansible represents a change to a check. As a result this
- does not attempt to determine changes and will always report a changed occurred. An API method is planned to supply this
- metadata so at that stage change management will be added.
+ metadata it is not possible to tell if the data supplied with ansible represents a change to a check. As a result, the
+ module does not attempt to determine changes and it always reports a changed occurred. An API method is planned to supply
+ this metadata so at that stage change management is to be added.
author:
- Michael Ilg (@Ilgmi)
extends_documentation_fragment:
@@ -36,8 +36,8 @@ attributes:
diff_mode:
support: partial
details:
- - In check mode the diff will show the object as it is defined in the module options and not the object structure of
- the Consul API.
+ - In check mode the diff shows the object as it is defined in the module options and not the object structure of the
+ Consul API.
options:
state:
description:
@@ -52,13 +52,13 @@ options:
id:
description:
- Specifies a unique ID for this check on the node. This defaults to the O(name) parameter, but it may be necessary
- to provide an ID for uniqueness. This value will return in the response as "CheckId".
+ to provide an ID for uniqueness. This value is returned in the response as V(CheckId).
type: str
interval:
description:
- - The interval at which the service check will be run. This is a number with a V(s) or V(m) suffix to signify the units
- of seconds or minutes, for example V(15s) or V(1m). If no suffix is supplied V(s) will be used by default, for example
- V(10) will be V(10s).
+ - The interval at which the service check is run. This is a number with a V(s) or V(m) suffix to signify the units of
+ seconds or minutes, for example V(15s) or V(1m). If no suffix is supplied V(s) is used by default, for example V(10)
+ is equivalent to V(10s).
- Required if one of the parameters O(args), O(http), or O(tcp) is specified.
type: str
notes:
@@ -74,11 +74,11 @@ options:
elements: str
ttl:
description:
- - Checks can be registered with a TTL instead of a O(args) and O(interval) this means that the service will check in
- with the agent before the TTL expires. If it does not the check will be considered failed. Required if registering
- a check and the script an interval are missing Similar to the interval this is a number with a V(s) or V(m) suffix
- to signify the units of seconds or minutes, for example V(15s) or V(1m). If no suffix is supplied V(s) will be used
- by default, for example V(10) will be V(10s).
+ - Checks can be registered with a TTL instead of a O(args) and O(interval) this means that the service checks in with
+ the agent before the TTL expires. If it does not the check is considered failed. Required if registering a check and
+ the script an interval are missing Similar to the interval this is a number with a V(s) or V(m) suffix to signify
+ the units of seconds or minutes, for example V(15s) or V(1m). If no suffix is supplied V(s) is used by default, for
+ example V(10) is equivalent to V(10s).
- Mutually exclusive with O(args), O(tcp) and O(http).
type: str
tcp:
@@ -91,8 +91,8 @@ options:
version_added: '1.3.0'
http:
description:
- - Checks can be registered with an HTTP endpoint. This means that Consul will check that the http endpoint returns a
- successful HTTP status.
+ - Checks can be registered with an HTTP endpoint. This means that Consul checks that the HTTP endpoint returns a successful
+ HTTP status.
- Requires O(interval) to be provided.
- Mutually exclusive with O(args), O(ttl) and O(tcp).
type: str
@@ -100,7 +100,7 @@ options:
description:
- A custom HTTP check timeout. The Consul default is 10 seconds. Similar to the interval this is a number with a V(s)
or V(m) suffix to signify the units of seconds or minutes, for example V(15s) or V(1m). If no suffix is supplied V(s)
- will be used by default, for example V(10) will be V(10s).
+ is used by default, for example V(10) is equivalent to V(10s).
type: str
service_id:
description:
diff --git a/plugins/modules/consul_agent_service.py b/plugins/modules/consul_agent_service.py
index bd28dfd2c3..882e45dceb 100644
--- a/plugins/modules/consul_agent_service.py
+++ b/plugins/modules/consul_agent_service.py
@@ -31,7 +31,7 @@ attributes:
diff_mode:
support: partial
details:
- - In check mode the diff will miss operational attributes.
+ - In check mode the diff misses operational attributes.
options:
state:
description:
@@ -50,13 +50,13 @@ options:
type: str
tags:
description:
- - Tags that will be attached to the service registration.
+ - Tags that are attached to the service registration.
type: list
elements: str
address:
description:
- - The address to advertise that the service will be listening on. This value will be passed as the C(address) parameter
- to Consul's C(/v1/agent/service/register) API method, so refer to the Consul API documentation for further details.
+ - The address to advertise that the service listens on. This value is passed as the C(address) parameter to Consul's
+ C(/v1/agent/service/register) API method, so refer to the Consul API documentation for further details.
type: str
meta:
description:
diff --git a/plugins/modules/consul_auth_method.py b/plugins/modules/consul_auth_method.py
index a5cfd3b305..4658f906e3 100644
--- a/plugins/modules/consul_auth_method.py
+++ b/plugins/modules/consul_auth_method.py
@@ -29,7 +29,7 @@ attributes:
diff_mode:
support: partial
details:
- - In check mode the diff will miss operational attributes.
+ - In check mode the diff misses operational attributes.
options:
state:
description:
@@ -71,7 +71,7 @@ options:
config:
description:
- The raw configuration to use for the chosen auth method.
- - Contents will vary depending upon the type chosen.
+ - Contents vary depending upon the O(type) chosen.
- Required when the auth method is created.
type: dict
"""
diff --git a/plugins/modules/consul_binding_rule.py b/plugins/modules/consul_binding_rule.py
index 698ba5913f..0a4531fdf7 100644
--- a/plugins/modules/consul_binding_rule.py
+++ b/plugins/modules/consul_binding_rule.py
@@ -29,7 +29,7 @@ attributes:
diff_mode:
support: partial
details:
- - In check mode the diff will miss operational attributes.
+ - In check mode the diff misses operational attributes.
options:
state:
description:
diff --git a/plugins/modules/consul_kv.py b/plugins/modules/consul_kv.py
index 8152dd5c25..2987e71a86 100644
--- a/plugins/modules/consul_kv.py
+++ b/plugins/modules/consul_kv.py
@@ -36,12 +36,12 @@ options:
state:
description:
- The action to take with the supplied key and value. If the state is V(present) and O(value) is set, the key contents
- will be set to the value supplied and C(changed) will be set to V(true) only if the value was different to the current
- contents. If the state is V(present) and O(value) is not set, the existing value associated to the key will be returned.
- The state V(absent) will remove the key/value pair, again C(changed) will be set to V(true) only if the key actually
- existed prior to the removal. An attempt can be made to obtain or free the lock associated with a key/value pair with
- the states V(acquire) or V(release) respectively. A valid session must be supplied to make the attempt C(changed)
- will be V(true) if the attempt is successful, V(false) otherwise.
+ is set to the value supplied and C(changed) is set to V(true) only if the value was different to the current contents.
+ If the state is V(present) and O(value) is not set, the existing value associated to the key is returned. The state
+ V(absent) is used to remove the key/value pair, again C(changed) is set to V(true) only if the key actually existed
+ prior to the removal. An attempt can be made to obtain or free the lock associated with a key/value pair with the
+ states V(acquire) or V(release) respectively. A valid session must be supplied to make the attempt C(changed) is V(true)
+ if the attempt is successful, V(false) otherwise.
type: str
choices: [absent, acquire, present, release]
default: present
@@ -73,9 +73,8 @@ options:
type: str
cas:
description:
- - Used when acquiring a lock with a session. If the O(cas) is V(0), then Consul will only put the key if it does not
- already exist. If the O(cas) value is non-zero, then the key is only set if the index matches the ModifyIndex of that
- key.
+ - Used when acquiring a lock with a session. If the O(cas) is V(0), then Consul only puts the key if it does not already
+ exist. If the O(cas) value is non-zero, then the key is only set if the index matches the ModifyIndex of that key.
type: str
flags:
description:
@@ -103,8 +102,7 @@ options:
default: true
datacenter:
description:
- - The name of the datacenter to query. If unspecified, the query will default to the datacenter of the Consul agent
- on O(host).
+ - The name of the datacenter to query. If unspecified, the query defaults to the datacenter of the Consul agent on O(host).
type: str
version_added: 10.0.0
"""
@@ -302,7 +300,7 @@ def main():
module = AnsibleModule(
argument_spec=dict(
cas=dict(type='str'),
- datacenter=dict(type='str', default=None),
+ datacenter=dict(type='str'),
flags=dict(type='str'),
key=dict(type='str', required=True, no_log=False),
host=dict(type='str', default='localhost'),
diff --git a/plugins/modules/consul_policy.py b/plugins/modules/consul_policy.py
index c9758780b2..e009e44434 100644
--- a/plugins/modules/consul_policy.py
+++ b/plugins/modules/consul_policy.py
@@ -31,7 +31,7 @@ attributes:
support: partial
version_added: 8.3.0
details:
- - In check mode the diff will miss operational attributes.
+ - In check mode the diff misses operational attributes.
action_group:
version_added: 8.3.0
options:
@@ -132,7 +132,7 @@ from ansible_collections.community.general.plugins.module_utils.consul import (
_ARGUMENT_SPEC = {
"name": dict(required=True),
- "description": dict(required=False, type="str"),
+ "description": dict(type="str"),
"rules": dict(type="str"),
"valid_datacenters": dict(type="list", elements="str"),
"state": dict(default="present", choices=["present", "absent"]),
diff --git a/plugins/modules/consul_role.py b/plugins/modules/consul_role.py
index 9ba9856744..4efbef699a 100644
--- a/plugins/modules/consul_role.py
+++ b/plugins/modules/consul_role.py
@@ -29,7 +29,7 @@ attributes:
diff_mode:
support: partial
details:
- - In check mode the diff will miss operational attributes.
+ - In check mode the diff misses operational attributes.
version_added: 8.3.0
action_group:
version_added: 8.3.0
@@ -48,15 +48,15 @@ options:
description:
description:
- Description of the role.
- - If not specified, the assigned description will not be changed.
+ - If not specified, the assigned description is not changed.
type: str
policies:
type: list
elements: dict
description:
- List of policies to attach to the role. Each policy is a dict.
- - If the parameter is left blank, any policies currently assigned will not be changed.
- - Any empty array (V([])) will clear any policies previously set.
+ - If the parameter is left blank, any policies currently assigned are not changed.
+ - Any empty array (V([])) clears any policies previously set.
suboptions:
name:
description:
@@ -90,8 +90,8 @@ options:
elements: dict
description:
- List of service identities to attach to the role.
- - If not specified, any service identities currently assigned will not be changed.
- - If the parameter is an empty array (V([])), any node identities assigned will be unassigned.
+ - If not specified, any service identities currently assigned are not changed.
+ - If the parameter is an empty array (V([])), any node identities assigned are unassigned.
suboptions:
service_name:
description:
@@ -106,9 +106,9 @@ options:
- name
datacenters:
description:
- - The datacenters the policies will be effective.
- - This will result in effective policy only being valid in this datacenter.
- - If an empty array (V([])) is specified, the policies will valid in all datacenters.
+ - The datacenters where the policies are effective.
+ - This results in effective policy only being valid in this datacenter.
+ - If an empty array (V([])) is specified, the policies are valid in all datacenters.
- Including those which do not yet exist but may in the future.
type: list
elements: str
@@ -117,8 +117,8 @@ options:
elements: dict
description:
- List of node identities to attach to the role.
- - If not specified, any node identities currently assigned will not be changed.
- - If the parameter is an empty array (V([])), any node identities assigned will be unassigned.
+ - If not specified, any node identities currently assigned are not changed.
+ - If the parameter is an empty array (V([])), any node identities assigned are unassigned.
suboptions:
node_name:
description:
@@ -134,7 +134,7 @@ options:
datacenter:
description:
- The nodes datacenter.
- - This will result in effective policy only being valid in this datacenter.
+ - This results in effective policy only being valid in this datacenter.
type: str
required: true
"""
@@ -182,17 +182,20 @@ role:
returned: success
type: dict
sample:
- {
- "CreateIndex": 39,
- "Description": "",
- "Hash": "Trt0QJtxVEfvTTIcdTUbIJRr6Dsi6E4EcwSFxx9tCYM=",
- "ID": "9a300b8d-48db-b720-8544-a37c0f5dafb5",
- "ModifyIndex": 39,
- "Name": "foo-role",
- "Policies": [
- {"ID": "b1a00172-d7a1-0e66-a12e-7a4045c4b774", "Name": "foo-access"}
- ]
- }
+ {
+ "CreateIndex": 39,
+ "Description": "",
+ "Hash": "Trt0QJtxVEfvTTIcdTUbIJRr6Dsi6E4EcwSFxx9tCYM=",
+ "ID": "9a300b8d-48db-b720-8544-a37c0f5dafb5",
+ "ModifyIndex": 39,
+ "Name": "foo-role",
+ "Policies": [
+ {
+ "ID": "b1a00172-d7a1-0e66-a12e-7a4045c4b774",
+ "Name": "foo-access"
+ }
+ ]
+ }
operation:
description: The operation performed on the role.
returned: changed
diff --git a/plugins/modules/consul_session.py b/plugins/modules/consul_session.py
index a72136ad66..637b09aff2 100644
--- a/plugins/modules/consul_session.py
+++ b/plugins/modules/consul_session.py
@@ -57,7 +57,7 @@ options:
default: 15
node:
description:
- - The name of the node that with which the session will be associated. By default this is the name of the agent.
+ - The name of the node that with which the session is associated. By default this is the name of the agent.
type: str
datacenter:
description:
@@ -65,8 +65,8 @@ options:
type: str
checks:
description:
- - Checks that will be used to verify the session health. If all the checks fail, the session will be invalidated and
- any locks associated with the session will be release and can be acquired once the associated lock delay has expired.
+ - Checks that are used to verify the session health. If all the checks fail, the session is invalidated and any locks
+ associated with the session are released and can be acquired once the associated lock delay has expired.
type: list
elements: str
behavior:
diff --git a/plugins/modules/consul_token.py b/plugins/modules/consul_token.py
index b525b2dc2a..1e5aa19f4c 100644
--- a/plugins/modules/consul_token.py
+++ b/plugins/modules/consul_token.py
@@ -29,7 +29,7 @@ attributes:
diff_mode:
support: partial
details:
- - In check mode the diff will miss operational attributes.
+ - In check mode the diff misses operational attributes.
action_group:
version_added: 8.3.0
options:
@@ -41,11 +41,11 @@ options:
type: str
accessor_id:
description:
- - Specifies a UUID to use as the token's Accessor ID. If not specified a UUID will be generated for this field.
+ - Specifies a UUID to use as the token's Accessor ID. If not specified a UUID is generated for this field.
type: str
secret_id:
description:
- - Specifies a UUID to use as the token's Secret ID. If not specified a UUID will be generated for this field.
+ - Specifies a UUID to use as the token's Secret ID. If not specified a UUID is generated for this field.
type: str
description:
description:
@@ -56,8 +56,8 @@ options:
elements: dict
description:
- List of policies to attach to the token. Each policy is a dict.
- - If the parameter is left blank, any policies currently assigned will not be changed.
- - Any empty array (V([])) will clear any policies previously set.
+ - If the parameter is left blank, any policies currently assigned are not changed.
+ - Any empty array (V([])) clears any policies previously set.
suboptions:
name:
description:
@@ -74,8 +74,8 @@ options:
elements: dict
description:
- List of roles to attach to the token. Each role is a dict.
- - If the parameter is left blank, any roles currently assigned will not be changed.
- - Any empty array (V([])) will clear any roles previously set.
+ - If the parameter is left blank, any roles currently assigned are not changed.
+ - Any empty array (V([])) clears any roles previously set.
suboptions:
name:
description:
@@ -108,8 +108,8 @@ options:
elements: dict
description:
- List of service identities to attach to the token.
- - If not specified, any service identities currently assigned will not be changed.
- - If the parameter is an empty array (V([])), any node identities assigned will be unassigned.
+ - If not specified, any service identities currently assigned are not changed.
+ - If the parameter is an empty array (V([])), any node identities assigned are unassigned.
suboptions:
service_name:
description:
@@ -120,8 +120,8 @@ options:
required: true
datacenters:
description:
- - The datacenters the token will be effective.
- - If an empty array (V([])) is specified, the token will valid in all datacenters.
+ - The datacenters where the token is effective.
+ - If an empty array (V([])) is specified, the token is valid in all datacenters.
- Including those which do not yet exist but may in the future.
type: list
elements: str
@@ -130,8 +130,8 @@ options:
elements: dict
description:
- List of node identities to attach to the token.
- - If not specified, any node identities currently assigned will not be changed.
- - If the parameter is an empty array (V([])), any node identities assigned will be unassigned.
+ - If not specified, any node identities currently assigned are not changed.
+ - If the parameter is an empty array (V([])), any node identities assigned are unassigned.
suboptions:
node_name:
description:
@@ -143,7 +143,7 @@ options:
datacenter:
description:
- The nodes datacenter.
- - This will result in effective token only being valid in this datacenter.
+ - This results in effective token only being valid in this datacenter.
type: str
required: true
local:
@@ -152,7 +152,7 @@ options:
type: bool
expiration_ttl:
description:
- - This is a convenience field and if set will initialize the C(expiration_time). Can be specified in the form of V(60s)
+ - This is a convenience field and if set it initializes the C(expiration_time). Can be specified in the form of V(60s)
or V(5m) (that is, 60 seconds or 5 minutes, respectively). Ingored when the token is updated!
type: str
"""
diff --git a/plugins/modules/copr.py b/plugins/modules/copr.py
index 739092b8af..940fc0eedd 100644
--- a/plugins/modules/copr.py
+++ b/plugins/modules/copr.py
@@ -494,8 +494,8 @@ def run_module():
name=dict(type="str", required=True),
state=dict(type="str", choices=["enabled", "disabled", "absent"], default="enabled"),
chroot=dict(type="str"),
- includepkgs=dict(type='list', elements="str", required=False),
- excludepkgs=dict(type='list', elements="str", required=False),
+ includepkgs=dict(type='list', elements="str"),
+ excludepkgs=dict(type='list', elements="str"),
)
module = AnsibleModule(argument_spec=module_args, supports_check_mode=True)
params = module.params
diff --git a/plugins/modules/cpanm.py b/plugins/modules/cpanm.py
index 356cbbb215..482183c0e0 100644
--- a/plugins/modules/cpanm.py
+++ b/plugins/modules/cpanm.py
@@ -59,16 +59,18 @@ options:
install_recommendations:
description:
- If V(true), installs dependencies declared as recommends per META spec.
- - If V(false), it ensures the dependencies declared as recommends are not installed, overriding any decision made earlier in E(PERL_CPANM_OPT).
- - If parameter is not set, C(cpanm) will use its existing defaults.
+ - If V(false), it ensures the dependencies declared as recommends are not installed, overriding any decision made earlier
+ in E(PERL_CPANM_OPT).
+ - If parameter is not set, C(cpanm) uses its existing defaults.
- When these dependencies fail to install, cpanm continues the installation, since they are just recommendation.
type: bool
version_added: 10.3.0
install_suggestions:
description:
- If V(true), installs dependencies declared as suggests per META spec.
- - If V(false), it ensures the dependencies declared as suggests are not installed, overriding any decision made earlier in E(PERL_CPANM_OPT).
- - If parameter is not set, C(cpanm) will use its existing defaults.
+ - If V(false), it ensures the dependencies declared as suggests are not installed, overriding any decision made earlier
+ in E(PERL_CPANM_OPT).
+ - If parameter is not set, C(cpanm) uses its existing defaults.
- When these dependencies fail to install, cpanm continues the installation, since they are just suggestion.
type: bool
version_added: 10.3.0
@@ -97,14 +99,13 @@ options:
notes:
- Please note that U(http://search.cpan.org/dist/App-cpanminus/bin/cpanm, cpanm) must be installed on the remote host.
- 'This module now comes with a choice of execution O(mode): V(compatibility) or V(new).'
- - 'O(mode=compatibility): When using V(compatibility) mode, the module will keep backward compatibility. This was the default
+ - 'O(mode=compatibility): When using V(compatibility) mode, the module keeps backward compatibility. This was the default
mode before community.general 9.0.0. O(name) must be either a module name or a distribution file. If the perl module given
- by O(name) is installed (at the exact O(version) when specified), then nothing happens. Otherwise, it will be installed
- using the C(cpanm) executable. O(name) cannot be an URL, or a git URL. C(cpanm) version specifiers do not work in this
- mode.'
- - 'O(mode=new): When using V(new) mode, the module will behave differently. The O(name) parameter may refer to a module
- name, a distribution file, a HTTP URL or a git repository URL as described in C(cpanminus) documentation. C(cpanm) version
- specifiers are recognized. This is the default mode from community.general 9.0.0 onwards.'
+ by O(name) is installed (at the exact O(version) when specified), then nothing happens. Otherwise, it is installed using
+ the C(cpanm) executable. O(name) cannot be an URL, or a git URL. C(cpanm) version specifiers do not work in this mode.'
+ - 'O(mode=new): When using V(new) mode, the module behaves differently. The O(name) parameter may refer to a module name,
+ a distribution file, a HTTP URL or a git repository URL as described in C(cpanminus) documentation. C(cpanm) version specifiers
+ are recognized. This is the default mode from community.general 9.0.0 onwards.'
seealso:
- name: C(cpanm) command manual page
description: Manual page for the command.
@@ -204,7 +205,6 @@ class CPANMinus(ModuleHelper):
pkg_spec=cmd_runner_fmt.as_list(),
cpanm_version=cmd_runner_fmt.as_fixed("--version"),
)
- use_old_vardict = False
def __init_module__(self):
v = self.vars
diff --git a/plugins/modules/cronvar.py b/plugins/modules/cronvar.py
index 4f00aef07c..5f7d02bfc3 100644
--- a/plugins/modules/cronvar.py
+++ b/plugins/modules/cronvar.py
@@ -43,12 +43,12 @@ options:
type: str
insertafter:
description:
- - If specified, the variable will be inserted after the variable specified.
+ - If specified, the variable is inserted after the variable specified.
- Used with O(state=present).
type: str
insertbefore:
description:
- - Used with O(state=present). If specified, the variable will be inserted just before the variable specified.
+ - Used with O(state=present). If specified, the variable is inserted just before the variable specified.
type: str
state:
description:
@@ -135,6 +135,9 @@ class CronVar(object):
self.cron_file = cron_file
else:
self.cron_file = os.path.join('/etc/cron.d', cron_file)
+ parent_dir = os.path.dirname(self.cron_file)
+ if parent_dir and not os.path.isdir(parent_dir):
+ module.fail_json(msg="Parent directory '{}' does not exist for cron_file: '{}'".format(parent_dir, cron_file))
else:
self.cron_file = None
@@ -393,6 +396,8 @@ def main():
old_value = cronvar.find_variable(name)
if ensure_present:
+ if value == "" and old_value != "":
+ value = '""'
if old_value is None:
cronvar.add_variable(name, value, insertbefore, insertafter)
changed = True
diff --git a/plugins/modules/crypttab.py b/plugins/modules/crypttab.py
index f728e39ade..5749d75cec 100644
--- a/plugins/modules/crypttab.py
+++ b/plugins/modules/crypttab.py
@@ -24,14 +24,14 @@ options:
name:
description:
- Name of the encrypted block device as it appears in the C(/etc/crypttab) file, or optionally prefixed with V(/dev/mapper/),
- as it appears in the filesystem. V(/dev/mapper/) will be stripped from O(name).
+ as it appears in the filesystem. V(/dev/mapper/) is stripped from O(name).
type: str
required: true
state:
description:
- Use V(present) to add a line to C(/etc/crypttab) or update its definition if already present.
- Use V(absent) to remove a line with matching O(name).
- - Use V(opts_present) to add options to those already present; options with different values will be updated.
+ - Use V(opts_present) to add options to those already present; options with different values are updated.
- Use V(opts_absent) to remove options from the existing set.
type: str
required: true
@@ -72,7 +72,15 @@ EXAMPLES = r"""
state: opts_present
opts: discard
loop: '{{ ansible_mounts }}'
- when: "'/dev/mapper/luks-' in {{ item.device }}"
+ when: "'/dev/mapper/luks-' in item.device"
+
+- name: Add entry to /etc/crypttab for luks-home with password file
+ community.general.crypttab:
+ name: luks-home
+ backing_device: UUID=123e4567-e89b-12d3-a456-426614174000
+ password: /root/keys/luks-home.key
+ opts: discard,cipher=aes-cbc-essiv:sha256
+ state: present
"""
import os
@@ -116,7 +124,7 @@ def main():
('backing_device', backing_device),
('password', password),
('opts', opts)):
- if (arg is not None and (' ' in arg or '\t' in arg or arg == '')):
+ if arg is not None and (' ' in arg or '\t' in arg or arg == ''):
module.fail_json(msg="invalid '%s': contains white space or is empty" % arg_name,
**module.params)
diff --git a/plugins/modules/datadog_downtime.py b/plugins/modules/datadog_downtime.py
index f693ba3c2d..9e48410014 100644
--- a/plugins/modules/datadog_downtime.py
+++ b/plugins/modules/datadog_downtime.py
@@ -126,30 +126,31 @@ RETURN = r"""
# Returns the downtime JSON dictionary from the API response under the C(downtime) key.
# See https://docs.datadoghq.com/api/v1/downtimes/#schedule-a-downtime for more details.
downtime:
- description: The downtime returned by the API.
- type: dict
- returned: always
- sample: {
- "active": true,
- "canceled": null,
- "creator_id": 1445416,
- "disabled": false,
- "downtime_type": 2,
- "end": null,
- "id": 1055751000,
- "message": "Downtime for foo:bar",
- "monitor_id": null,
- "monitor_tags": [
- "foo:bar"
- ],
- "parent_id": null,
- "recurrence": null,
- "scope": [
- "test"
- ],
- "start": 1607015009,
- "timezone": "UTC",
- "updater_id": null
+ description: The downtime returned by the API.
+ type: dict
+ returned: always
+ sample:
+ {
+ "active": true,
+ "canceled": null,
+ "creator_id": 1445416,
+ "disabled": false,
+ "downtime_type": 2,
+ "end": null,
+ "id": 1055751000,
+ "message": "Downtime for foo:bar",
+ "monitor_id": null,
+ "monitor_tags": [
+ "foo:bar"
+ ],
+ "parent_id": null,
+ "recurrence": null,
+ "scope": [
+ "test"
+ ],
+ "start": 1607015009,
+ "timezone": "UTC",
+ "updater_id": null
}
"""
@@ -174,18 +175,18 @@ def main():
module = AnsibleModule(
argument_spec=dict(
api_key=dict(required=True, no_log=True),
- api_host=dict(required=False, default="https://api.datadoghq.com"),
+ api_host=dict(default="https://api.datadoghq.com"),
app_key=dict(required=True, no_log=True),
- state=dict(required=False, choices=["present", "absent"], default="present"),
- monitor_tags=dict(required=False, type="list", elements="str"),
- scope=dict(required=False, type="list", elements="str"),
- monitor_id=dict(required=False, type="int"),
- downtime_message=dict(required=False, no_log=True),
- start=dict(required=False, type="int"),
- end=dict(required=False, type="int"),
- timezone=dict(required=False, type="str"),
- rrule=dict(required=False, type="str"),
- id=dict(required=False, type="int"),
+ state=dict(choices=["present", "absent"], default="present"),
+ monitor_tags=dict(type="list", elements="str"),
+ scope=dict(type="list", elements="str"),
+ monitor_id=dict(type="int"),
+ downtime_message=dict(no_log=True),
+ start=dict(type="int"),
+ end=dict(type="int"),
+ timezone=dict(type="str"),
+ rrule=dict(type="str"),
+ id=dict(type="int"),
)
)
diff --git a/plugins/modules/datadog_event.py b/plugins/modules/datadog_event.py
index 97be0c9b16..fd75ea81de 100644
--- a/plugins/modules/datadog_event.py
+++ b/plugins/modules/datadog_event.py
@@ -16,7 +16,7 @@ __metaclass__ = type
DOCUMENTATION = r"""
module: datadog_event
-short_description: Posts events to Datadog service
+short_description: Posts events to Datadog service
description:
- Allows to post events to Datadog (www.datadoghq.com) service.
- Uses http://docs.datadoghq.com/api/#events API.
@@ -89,8 +89,8 @@ options:
- An arbitrary string to use for aggregation.
validate_certs:
description:
- - If V(false), SSL certificates will not be validated. This should only be used on personally controlled sites using
- self-signed certificates.
+ - If V(false), SSL certificates are not validated. This should only be used on personally controlled sites using self-signed
+ certificates.
type: bool
default: true
"""
diff --git a/plugins/modules/datadog_monitor.py b/plugins/modules/datadog_monitor.py
index eec0db0d32..f778d2444d 100644
--- a/plugins/modules/datadog_monitor.py
+++ b/plugins/modules/datadog_monitor.py
@@ -92,26 +92,26 @@ options:
type: dict
description:
- Dictionary of scopes to silence, with timestamps or None.
- - Each scope will be muted until the given POSIX timestamp or forever if the value is None.
+ - Each scope is muted until the given POSIX timestamp or forever if the value is V(None).
notify_no_data:
description:
- - Whether this monitor will notify when data stops reporting.
+ - Whether this monitor notifies when data stops reporting.
type: bool
default: false
no_data_timeframe:
description:
- - The number of minutes before a monitor will notify when data stops reporting.
+ - The number of minutes before a monitor notifies when data stops reporting.
- Must be at least 2x the monitor timeframe for metric alerts or 2 minutes for service checks.
- If not specified, it defaults to 2x timeframe for metric, 2 minutes for service.
type: str
timeout_h:
description:
- - The number of hours of the monitor not reporting data before it will automatically resolve from a triggered state.
+ - The number of hours of the monitor not reporting data before it automatically resolves from a triggered state.
type: str
renotify_interval:
description:
- - The number of minutes after the last notification before a monitor will re-notify on the current status.
- - It will only re-notify if it is not resolved.
+ - The number of minutes after the last notification before a monitor re-notifies on the current status.
+ - It only re-notifies if it is not resolved.
type: str
escalation_message:
description:
@@ -120,7 +120,7 @@ options:
type: str
notify_audit:
description:
- - Whether tagged users will be notified on changes to this monitor.
+ - Whether tagged users are notified on changes to this monitor.
type: bool
default: false
thresholds:
@@ -138,7 +138,7 @@ options:
require_full_window:
description:
- Whether this monitor needs a full window of data before it gets evaluated.
- - We highly recommend you set this to False for sparse metrics, otherwise some evaluations will be skipped.
+ - We highly recommend you set this to V(false) for sparse metrics, otherwise some evaluations are skipped.
type: bool
new_host_delay:
description:
@@ -153,7 +153,7 @@ options:
id:
description:
- The ID of the alert.
- - If set, will be used instead of the name to locate the alert.
+ - If set, it is used instead of O(name) to locate the alert.
type: str
include_tags:
description:
@@ -275,14 +275,14 @@ def main():
renotify_interval=dict(),
escalation_message=dict(),
notify_audit=dict(default=False, type='bool'),
- thresholds=dict(type='dict', default=None),
- tags=dict(type='list', elements='str', default=None),
+ thresholds=dict(type='dict'),
+ tags=dict(type='list', elements='str'),
locked=dict(default=False, type='bool'),
require_full_window=dict(type='bool'),
new_host_delay=dict(),
evaluation_delay=dict(),
id=dict(),
- include_tags=dict(required=False, default=True, type='bool'),
+ include_tags=dict(default=True, type='bool'),
priority=dict(type='int'),
notification_preset_name=dict(choices=['show_all', 'hide_query', 'hide_handles', 'hide_all']),
renotify_occurrences=dict(type='int'),
@@ -435,7 +435,7 @@ def mute_monitor(module):
module.fail_json(msg="Monitor %s not found!" % module.params['name'])
elif monitor['options']['silenced']:
module.fail_json(msg="Monitor is already muted. Datadog does not allow to modify muted alerts, consider unmuting it first.")
- elif (module.params['silenced'] is not None and len(set(monitor['options']['silenced']) ^ set(module.params['silenced'])) == 0):
+ elif module.params['silenced'] is not None and len(set(monitor['options']['silenced']) ^ set(module.params['silenced'])) == 0:
module.exit_json(changed=False)
try:
if module.params['silenced'] is None or module.params['silenced'] == "":
diff --git a/plugins/modules/dconf.py b/plugins/modules/dconf.py
index 319d6770f2..762c443130 100644
--- a/plugins/modules/dconf.py
+++ b/plugins/modules/dconf.py
@@ -17,10 +17,10 @@ short_description: Modify and read dconf database
description:
- This module allows modifications and reading of C(dconf) database. The module is implemented as a wrapper around C(dconf)
tool. Please see the dconf(1) man page for more details.
- - Since C(dconf) requires a running D-Bus session to change values, the module will try to detect an existing session and
- reuse it, or run the tool using C(dbus-run-session).
+ - Since C(dconf) requires a running D-Bus session to change values, the module tries to detect an existing session and reuse
+ it, or run the tool using C(dbus-run-session).
requirements:
- - Optionally the C(gi.repository) Python library (usually included in the OS on hosts which have C(dconf)); this will become
+ - Optionally the C(gi.repository) Python library (usually included in the OS on hosts which have C(dconf)); this is to become
a non-optional requirement in a future major release of community.general.
notes:
- This module depends on C(psutil) Python library (version 4.0.0 and upwards), C(dconf), C(dbus-send), and C(dbus-run-session)
@@ -28,7 +28,7 @@ notes:
- This module uses the C(gi.repository) Python library when available for accurate comparison of values in C(dconf) to values
specified in Ansible code. C(gi.repository) is likely to be present on most systems which have C(dconf) but may not be
present everywhere. When it is missing, a simple string comparison between values is used, and there may be false positives,
- that is, Ansible may think that a value is being changed when it is not. This fallback will be removed in a future version
+ that is, Ansible may think that a value is being changed when it is not. This fallback is to be removed in a future version
of this module, at which point the module will stop working on hosts without C(gi.repository).
- Detection of existing, running D-Bus session, required to change settings using C(dconf), is not 100% reliable due to
implementation details of D-Bus daemon itself. This might lead to running applications not picking-up changes on-the-fly
@@ -398,7 +398,7 @@ def main():
state=dict(default='present', choices=['present', 'absent', 'read']),
key=dict(required=True, type='str', no_log=False),
# Converted to str below after special handling of bool.
- value=dict(required=False, default=None, type='raw'),
+ value=dict(type='raw'),
),
supports_check_mode=True,
required_if=[
diff --git a/plugins/modules/decompress.py b/plugins/modules/decompress.py
index aa7a14aefb..03be61a8e6 100644
--- a/plugins/modules/decompress.py
+++ b/plugins/modules/decompress.py
@@ -33,13 +33,12 @@ options:
required: true
dest:
description:
- - The file name of the destination file where the compressed file will be decompressed.
- - If the destination file exists, it will be truncated and overwritten.
- - If not specified, the destination filename will be derived from O(src) by removing the compression format extension.
- For example, if O(src) is V(/path/to/file.txt.gz) and O(format) is V(gz), O(dest) will be V(/path/to/file.txt). If
- the O(src) file does not have an extension for the current O(format), the O(dest) filename will be made by appending
- C(_decompressed) to the O(src) filename. For instance, if O(src) is V(/path/to/file.myextension), the (dest) filename
- will be V(/path/to/file.myextension_decompressed).
+ - The file name of the destination file where the compressed file is decompressed.
+ - If the destination file exists, it is truncated and overwritten.
+ - If not specified, the destination filename is derived from O(src) by removing the compression format extension. For
+ example, when O(src) is V(/path/to/file.txt.gz) and O(format) is V(gz), O(dest) is V(/path/to/file.txt). If the O(src)
+ file does not have an extension for the current O(format), the O(dest) filename is made by appending C(_decompressed)
+ to the O(src) filename. For instance, when O(src) is V(/path/to/file.myextension), the (dest) filename is V(/path/to/file.myextension_decompressed).
type: path
format:
description:
@@ -132,7 +131,6 @@ def decompress(b_src, b_dest, handler):
class Decompress(ModuleHelper):
destination_filename_template = "%s_decompressed"
- use_old_vardict = False
output_params = 'dest'
module = dict(
diff --git a/plugins/modules/deploy_helper.py b/plugins/modules/deploy_helper.py
index 14a7d4f8c7..b25e68392b 100644
--- a/plugins/modules/deploy_helper.py
+++ b/plugins/modules/deploy_helper.py
@@ -18,8 +18,8 @@ short_description: Manages some of the steps common in deploying projects
description:
- The Deploy Helper manages some of the steps common in deploying software. It creates a folder structure, manages a symlink
for the current release and cleans up old releases.
- - Running it with the O(state=query) or O(state=present) will return the C(deploy_helper) fact. C(project_path), whatever
- you set in the O(path) parameter, C(current_path), the path to the symlink that points to the active release, C(releases_path),
+ - Running it with the O(state=query) or O(state=present) returns the C(deploy_helper) fact. C(project_path), whatever you
+ set in the O(path) parameter, C(current_path), the path to the symlink that points to the active release, C(releases_path),
the path to the folder to keep releases in, C(shared_path), the path to the folder to keep shared resources in, C(unfinished_filename),
the file to check for to recognize unfinished builds, C(previous_release), the release the 'current' symlink is pointing
to, C(previous_release_path), the full path to the 'current' symlink target, C(new_release), either the O(release) parameter
@@ -41,12 +41,12 @@ options:
type: str
description:
- The state of the project.
- - V(query) will only gather facts.
- - V(present) will create the project C(root) folder, and in it the C(releases) and C(shared) folders.
- - V(finalize) will remove the unfinished_filename file, create a symlink to the newly deployed release and optionally
- clean old releases.
- - V(clean) will remove failed & old releases.
- - V(absent) will remove the project folder (synonymous to the M(ansible.builtin.file) module with O(state=absent)).
+ - V(query) gathers facts.
+ - V(present) creates the project C(root) folder, and in it the C(releases) and C(shared) folders.
+ - V(finalize) removes the unfinished_filename file, creates a symlink to the newly deployed release and optionally cleans
+ old releases.
+ - V(clean) removes failed & old releases.
+ - V(absent) removes the project folder (synonymous to the M(ansible.builtin.file) module with O(state=absent)).
choices: [present, finalize, absent, clean, query]
default: present
@@ -59,15 +59,15 @@ options:
releases_path:
type: str
description:
- - The name of the folder that will hold the releases. This can be relative to O(path) or absolute. Returned in the C(deploy_helper.releases_path)
+ - The name of the folder that holds the releases. This can be relative to O(path) or absolute. Returned in the C(deploy_helper.releases_path)
fact.
default: releases
shared_path:
type: path
description:
- - The name of the folder that will hold the shared resources. This can be relative to O(path) or absolute. If this is
- set to an empty string, no shared folder will be created. Returned in the C(deploy_helper.shared_path) fact.
+ - The name of the folder that holds the shared resources. This can be relative to O(path) or absolute. If this is set
+ to an empty string, no shared folder is created. Returned in the C(deploy_helper.shared_path) fact.
default: shared
current_path:
@@ -81,8 +81,8 @@ options:
type: str
description:
- The name of the file that indicates a deploy has not finished. All folders in the O(releases_path) that contain this
- file will be deleted on O(state=finalize) with O(clean=true), or O(state=clean). This file is automatically deleted
- from the C(new_release_path) during O(state=finalize).
+ file are deleted on O(state=finalize) with O(clean=true), or O(state=clean). This file is automatically deleted from
+ the C(new_release_path) during O(state=finalize).
default: DEPLOY_UNFINISHED
clean:
@@ -95,16 +95,16 @@ options:
type: int
description:
- The number of old releases to keep when cleaning. Used in O(state=finalize) and O(state=clean). Any unfinished builds
- will be deleted first, so only correct releases will count. The current version will not count.
+ are deleted first, so only correct releases count. The current version does not count.
default: 5
notes:
- Facts are only returned for O(state=query) and O(state=present). If you use both, you should pass any overridden parameters
- to both calls, otherwise the second call will overwrite the facts of the first one.
+ to both calls, otherwise the second call overwrites the facts of the first one.
- When using O(state=clean), the releases are ordered by I(creation date). You should be able to switch to a new naming
strategy without problems.
- - Because of the default behaviour of generating the C(new_release) fact, this module will not be idempotent unless you
- pass your own release name with O(release). Due to the nature of deploying software, this should not be much of a problem.
+ - Because of the default behaviour of generating the C(new_release) fact, this module is not idempotent unless you pass
+ your own release name with O(release). Due to the nature of deploying software, this should not be much of a problem.
extends_documentation_fragment:
- ansible.builtin.files
- community.general.attributes
diff --git a/plugins/modules/dimensiondata_network.py b/plugins/modules/dimensiondata_network.py
index 6617d6aef1..04fff21e58 100644
--- a/plugins/modules/dimensiondata_network.py
+++ b/plugins/modules/dimensiondata_network.py
@@ -140,7 +140,7 @@ class DimensionDataNetworkModule(DimensionDataModule):
module=AnsibleModule(
argument_spec=DimensionDataModule.argument_spec_with_wait(
name=dict(type='str', required=True),
- description=dict(type='str', required=False),
+ description=dict(type='str'),
service_plan=dict(default='ESSENTIALS', choices=['ADVANCED', 'ESSENTIALS']),
state=dict(default='present', choices=['present', 'absent'])
),
diff --git a/plugins/modules/dimensiondata_vlan.py b/plugins/modules/dimensiondata_vlan.py
index 2389d34333..b28b12d998 100644
--- a/plugins/modules/dimensiondata_vlan.py
+++ b/plugins/modules/dimensiondata_vlan.py
@@ -56,8 +56,7 @@ options:
state:
description:
- The desired state for the target VLAN.
- - V(readonly) ensures that the state is only ever read, not modified (the module will fail if the resource does not
- exist).
+ - V(readonly) ensures that the state is only ever read, not modified (the module fails if the resource does not exist).
choices: [present, absent, readonly]
default: present
type: str
@@ -65,7 +64,7 @@ options:
description:
- Permit expansion of the target VLAN's network if the module parameters specify a larger network than the VLAN currently
possesses.
- - If V(false), the module will fail under these conditions.
+ - If V(false), the module fails under these conditions.
- This is intended to prevent accidental expansion of a VLAN's network (since this operation is not reversible).
type: bool
default: false
@@ -187,7 +186,7 @@ class DimensionDataVlanModule(DimensionDataModule):
network_domain=dict(required=True, type='str'),
private_ipv4_base_address=dict(default='', type='str'),
private_ipv4_prefix_size=dict(default=0, type='int'),
- allow_expand=dict(required=False, default=False, type='bool'),
+ allow_expand=dict(default=False, type='bool'),
state=dict(default='present', choices=['present', 'absent', 'readonly'])
),
required_together=DimensionDataModule.required_together()
diff --git a/plugins/modules/django_check.py b/plugins/modules/django_check.py
index 9699428b9c..e6e03c8276 100644
--- a/plugins/modules/django_check.py
+++ b/plugins/modules/django_check.py
@@ -22,7 +22,7 @@ options:
database:
description:
- Specify databases to run checks against.
- - If not specified, Django will not run database tests.
+ - If not specified, Django does not run database tests.
type: list
elements: str
deploy:
@@ -32,7 +32,7 @@ options:
default: false
fail_level:
description:
- - Message level that will trigger failure.
+ - Message level that triggers failure.
- Default is the Django default value. Check the documentation for the version being used.
type: str
choices: [CRITICAL, ERROR, WARNING, INFO, DEBUG]
@@ -49,7 +49,7 @@ options:
elements: str
notes:
- The outcome of the module is found in the common return values RV(ignore:stdout), RV(ignore:stderr), RV(ignore:rc).
- - The module will fail if RV(ignore:rc) is not zero.
+ - The module fails if RV(ignore:rc) is not zero.
attributes:
check_mode:
support: full
diff --git a/plugins/modules/django_manage.py b/plugins/modules/django_manage.py
index dab544a29d..0fe07890f8 100644
--- a/plugins/modules/django_manage.py
+++ b/plugins/modules/django_manage.py
@@ -15,7 +15,7 @@ module: django_manage
short_description: Manages a Django application
description:
- Manages a Django application using the C(manage.py) application frontend to C(django-admin). With the O(virtualenv) parameter,
- all management commands will be executed by the given C(virtualenv) installation.
+ all management commands are executed by the given C(virtualenv) installation.
extends_documentation_fragment:
- community.general.attributes
attributes:
@@ -34,8 +34,8 @@ options:
- V(loaddata) - Searches for and loads the contents of the named O(fixtures) into the database.
- V(migrate) - Synchronizes the database state with models and migrations.
- V(test) - Runs tests for all installed apps.
- - Other commands can be entered, but will fail if they are unknown to Django. Other commands that may prompt for user
- input should be run with the C(--noinput) flag.
+ - Custom commands can be entered, but they fail unless they are known to Django. Custom commands that may prompt for
+ user input should be run with the C(--noinput) flag.
- Support for the values V(cleanup), V(syncdb), V(validate) was removed in community.general 9.0.0. See note about supported
versions of Django.
type: str
@@ -62,7 +62,7 @@ options:
virtualenv:
description:
- An optional path to a C(virtualenv) installation to use while running the manage application.
- - The virtual environment must exist, otherwise the module will fail.
+ - The virtual environment must exist, otherwise the module fails.
type: path
aliases: [virtual_env]
apps:
@@ -78,7 +78,7 @@ options:
clear:
description:
- Clear the existing files before trying to copy or link the original file.
- - Used only with the V(collectstatic) command. The C(--noinput) argument will be added automatically.
+ - Used only with the V(collectstatic) command. The C(--noinput) argument is added automatically.
required: false
default: false
type: bool
@@ -101,18 +101,18 @@ options:
required: false
skip:
description:
- - Will skip over out-of-order missing migrations, you can only use this parameter with V(migrate) command.
+ - Skips over out-of-order missing migrations, you can only use this parameter with V(migrate) command.
required: false
type: bool
merge:
description:
- - Will run out-of-order or missing migrations as they are not rollback migrations, you can only use this parameter with
+ - Runs out-of-order or missing migrations as they are not rollback migrations, you can only use this parameter with
V(migrate) command.
required: false
type: bool
link:
description:
- - Will create links to the files instead of copying them, you can only use this parameter with V(collectstatic) command.
+ - Creates links to the files instead of copying them, you can only use this parameter with V(collectstatic) command.
required: false
type: bool
testrunner:
@@ -122,19 +122,13 @@ options:
type: str
required: false
aliases: [test_runner]
- ack_venv_creation_deprecation:
- description:
- - This option no longer has any effect since community.general 9.0.0.
- - It will be removed from community.general 11.0.0.
- type: bool
- version_added: 5.8.0
notes:
- 'B(ATTENTION): Support for Django releases older than 4.1 has been removed in community.general version 9.0.0. While the
module allows for free-form commands, not verifying the version of Django being used, it is B(strongly recommended) to
use a more recent version of the framework.'
- Please notice that Django 4.1 requires Python 3.8 or greater.
- - This module will not create a virtualenv if the O(virtualenv) parameter is specified and a virtual environment does not
+ - This module does not create a virtualenv if the O(virtualenv) parameter is specified and a virtual environment does not
already exist at the given location. This behavior changed in community.general version 9.0.0.
- The recommended way to create a virtual environment in Ansible is by using M(ansible.builtin.pip).
- This module assumes English error messages for the V(createcachetable) command to detect table existence, unfortunately.
@@ -291,7 +285,6 @@ def main():
skip=dict(type='bool'),
merge=dict(type='bool'),
link=dict(type='bool'),
- ack_venv_creation_deprecation=dict(type='bool', removed_in_version='11.0.0', removed_from_collection='community.general'),
),
)
diff --git a/plugins/modules/dnf_config_manager.py b/plugins/modules/dnf_config_manager.py
index 2d896f3742..eb64bee864 100644
--- a/plugins/modules/dnf_config_manager.py
+++ b/plugins/modules/dnf_config_manager.py
@@ -39,6 +39,8 @@ options:
required: false
type: str
choices: [enabled, disabled]
+notes:
+ - Does not work with C(dnf5).
seealso:
- module: ansible.builtin.dnf
- module: ansible.builtin.yum_repository
@@ -118,7 +120,7 @@ changed_repos:
returned: success
type: list
elements: str
- sample: ['crb']
+ sample: ["crb"]
"""
from ansible.module_utils.basic import AnsibleModule
@@ -173,8 +175,8 @@ def pack_repo_states_for_return(states):
def main():
module_args = dict(
- name=dict(type='list', elements='str', required=False, default=[]),
- state=dict(type='str', required=False, choices=['enabled', 'disabled'], default='enabled')
+ name=dict(type='list', elements='str', default=[]),
+ state=dict(type='str', choices=['enabled', 'disabled'], default='enabled')
)
result = dict(
diff --git a/plugins/modules/dnf_versionlock.py b/plugins/modules/dnf_versionlock.py
index eec7f3013a..b3e2e2bcc9 100644
--- a/plugins/modules/dnf_versionlock.py
+++ b/plugins/modules/dnf_versionlock.py
@@ -22,9 +22,9 @@ attributes:
support: partial
details:
- The logics of the C(versionlock) plugin for corner cases could be confusing, so please take in account that this module
- will do its best to give a C(check_mode) prediction on what is going to happen. In case of doubt, check the documentation
+ does its best to give a C(check_mode) prediction on what is going to happen. In case of doubt, check the documentation
of the plugin.
- - Sometimes the module could predict changes in C(check_mode) that will not be such because C(versionlock) concludes
+ - Sometimes the module could predict changes in C(check_mode) that are not fulfilled because C(versionlock) concludes
that there is already a entry in C(locklist) that already matches.
diff_mode:
support: none
@@ -47,12 +47,12 @@ options:
state:
description:
- Whether to add (V(present) or V(excluded)) to or remove (V(absent) or V(clean)) from the C(locklist).
- - V(present) will add a package name spec to the C(locklist). If there is a installed package that matches, then only
- that version will be added. Otherwise, all available package versions will be added.
- - V(excluded) will add a package name spec as excluded to the C(locklist). It means that packages represented by the
- package name spec will be excluded from transaction operations. All available package versions will be added.
- - V(absent) will delete entries in the C(locklist) that match the package name spec.
- - V(clean) will delete all entries in the C(locklist). This option is mutually exclusive with O(name).
+ - V(present) adds a package name spec to the C(locklist). If there is a installed package that matches, then only that
+ version is added. Otherwise, all available package versions are added.
+ - V(excluded) adds a package name spec as excluded to the C(locklist). It means that packages represented by the package
+ name spec are excluded from transaction operations. All available package versions are added.
+ - V(absent) deletes entries in the C(locklist) that match the package name spec.
+ - V(clean) deletes all entries in the C(locklist). This option is mutually exclusive with O(name).
choices: ['absent', 'clean', 'excluded', 'present']
type: str
default: present
@@ -60,6 +60,7 @@ notes:
- In an ideal world, the C(versionlock) plugin would have a dry-run option to know for sure what is going to happen. So
far we have to work with a best guess as close as possible to the behaviour inferred from its code.
- For most of cases where you want to lock and unlock specific versions of a package, this works fairly well.
+ - Does not work with C(dnf5).
requirements:
- dnf
- dnf-plugin-versionlock
@@ -82,12 +83,12 @@ EXAMPLES = r"""
- name: Remove lock from nginx to be updated again
community.general.dnf_versionlock:
- package: nginx
+ name: nginx
state: absent
- name: Exclude bind 32:9.11 from installs or updates
community.general.dnf_versionlock:
- package: bind-32:9.11*
+ name: bind-32:9.11*
state: excluded
- name: Keep bash package in major version 4
@@ -107,25 +108,25 @@ locklist_pre:
returned: success
type: list
elements: str
- sample: ['bash-0:4.4.20-1.el8_4.*', '!bind-32:9.11.26-4.el8_4.*']
+ sample: ["bash-0:4.4.20-1.el8_4.*", "!bind-32:9.11.26-4.el8_4.*"]
locklist_post:
description: Locklist after module execution.
returned: success and (not check mode or state is clean)
type: list
elements: str
- sample: ['bash-0:4.4.20-1.el8_4.*']
+ sample: ["bash-0:4.4.20-1.el8_4.*"]
specs_toadd:
description: Package name specs meant to be added by versionlock.
returned: success
type: list
elements: str
- sample: ['bash']
+ sample: ["bash"]
specs_todelete:
description: Package name specs meant to be deleted by versionlock.
returned: success
type: list
elements: str
- sample: ['bind']
+ sample: ["bind"]
"""
from ansible.module_utils.basic import AnsibleModule
diff --git a/plugins/modules/dnsimple.py b/plugins/modules/dnsimple.py
index ffa856f137..729c876841 100644
--- a/plugins/modules/dnsimple.py
+++ b/plugins/modules/dnsimple.py
@@ -25,8 +25,8 @@ attributes:
options:
account_email:
description:
- - Account email. If omitted, the environment variables E(DNSIMPLE_EMAIL) and E(DNSIMPLE_API_TOKEN) will be looked for.
- - 'If those variables are not found, a C(.dnsimple) file will be looked for, see: U(https://github.com/mikemaccana/dnsimple-python#getting-started).'
+ - Account email. If omitted, the environment variables E(DNSIMPLE_EMAIL) and E(DNSIMPLE_API_TOKEN) are looked for.
+ - 'If those variables are not found, a C(.dnsimple) file is looked for, see: U(https://github.com/mikemaccana/dnsimple-python#getting-started).'
- C(.dnsimple) config files are only supported in dnsimple-python<2.0.0.
type: str
account_api_token:
@@ -36,12 +36,12 @@ options:
domain:
description:
- Domain to work with. Can be the domain name (for example V(mydomain.com)) or the numeric ID of the domain in DNSimple.
- - If omitted, a list of domains will be returned.
- - If domain is present but the domain does not exist, it will be created.
+ - If omitted, a list of domains is returned.
+ - If domain is present but the domain does not exist, it is created.
type: str
record:
description:
- - Record to add, if blank a record for the domain will be created, supports the wildcard (*).
+ - Record to add, if blank a record for the domain is created, supports the wildcard (*).
type: str
record_ids:
description:
@@ -51,8 +51,23 @@ options:
type:
description:
- The type of DNS record to create.
- choices: ['A', 'ALIAS', 'CNAME', 'MX', 'SPF', 'URL', 'TXT', 'NS', 'SRV', 'NAPTR', 'PTR', 'AAAA', 'SSHFP', 'HINFO', 'POOL',
- 'CAA']
+ choices:
+ - A
+ - ALIAS
+ - CNAME
+ - MX
+ - SPF
+ - URL
+ - TXT
+ - NS
+ - SRV
+ - NAPTR
+ - PTR
+ - AAAA
+ - SSHFP
+ - HINFO
+ - POOL
+ - CAA
type: str
ttl:
description:
@@ -151,7 +166,7 @@ EXAMPLES = r"""
delegate_to: localhost
"""
-RETURN = r"""# """
+RETURN = r"""#"""
import traceback
import re
diff --git a/plugins/modules/dnsimple_info.py b/plugins/modules/dnsimple_info.py
index c508525fac..78b4ceae25 100644
--- a/plugins/modules/dnsimple_info.py
+++ b/plugins/modules/dnsimple_info.py
@@ -26,8 +26,8 @@ options:
name:
description:
- The domain name to retrieve info from.
- - Will return all associated records for this domain if specified.
- - If not specified, will return all domains associated with the account ID.
+ - Returns all associated records for this domain if specified.
+ - If not specified, returns all domains associated with the account ID.
type: str
account_id:
@@ -43,7 +43,7 @@ options:
record:
description:
- The record to find.
- - If specified, only this record will be returned instead of all records.
+ - If specified, only this record is returned instead of all records.
required: false
type: str
diff --git a/plugins/modules/dnsmadeeasy.py b/plugins/modules/dnsmadeeasy.py
index 83268af379..ec17880af7 100644
--- a/plugins/modules/dnsmadeeasy.py
+++ b/plugins/modules/dnsmadeeasy.py
@@ -50,7 +50,7 @@ options:
record_name:
description:
- - Record name to get/create/delete/update. If record_name is not specified; all records for the domain will be returned
+ - Record name to get/create/delete/update. If O(record_name) is not specified; all records for the domain are returned
in "result" regardless of the state argument.
type: str
@@ -64,8 +64,8 @@ options:
description:
- 'Record value. HTTPRED: , MX: , NS: , PTR: , SRV:
, TXT: ".'
- - If record_value is not specified; no changes will be made and the record will be returned in 'result' (in other words,
- this module can be used to fetch a record's current ID, type, and ttl).
+ - If O(record_value) is not specified; no changes are made and the record is returned in RV(ignore:result) (in other
+ words, this module can be used to fetch a record's current ID, type, and TTL).
type: str
record_ttl:
@@ -83,8 +83,8 @@ options:
validate_certs:
description:
- - If V(false), SSL certificates will not be validated. This should only be used on personally controlled sites using
- self-signed certificates.
+ - If V(false), SSL certificates are not validated. This should only be used on personally controlled sites using self-signed
+ certificates.
type: bool
default: true
@@ -128,7 +128,7 @@ options:
contactList:
description:
- - Name or ID of the contact list that the monitor will notify.
+ - Name or ID of the contact list that the monitor notifies.
- The default V('') means the Account Owner.
type: str
@@ -195,7 +195,7 @@ notes:
- Only A records can have a O(monitor) or O(failover).
- To add failover, the O(failover), O(autoFailover), O(port), O(protocol), O(ip1), and O(ip2) options are required.
- To add monitor, the O(monitor), O(port), O(protocol), O(maxEmails), O(systemDescription), and O(ip1) options are required.
- - The monitor and the failover will share O(port), O(protocol), and O(ip1) options.
+ - The options O(monitor) and O(failover) share O(port), O(protocol), and O(ip1) options.
requirements: [hashlib, hmac]
author: "Brice Burgess (@briceburg)"
"""
@@ -553,28 +553,28 @@ def main():
domain=dict(required=True),
sandbox=dict(default=False, type='bool'),
state=dict(required=True, choices=['present', 'absent']),
- record_name=dict(required=False),
- record_type=dict(required=False, choices=[
+ record_name=dict(),
+ record_type=dict(choices=[
'A', 'AAAA', 'CNAME', 'ANAME', 'HTTPRED', 'MX', 'NS', 'PTR', 'SRV', 'TXT']),
- record_value=dict(required=False),
- record_ttl=dict(required=False, default=1800, type='int'),
+ record_value=dict(),
+ record_ttl=dict(default=1800, type='int'),
monitor=dict(default=False, type='bool'),
systemDescription=dict(default=''),
maxEmails=dict(default=1, type='int'),
protocol=dict(default='HTTP', choices=['TCP', 'UDP', 'HTTP', 'DNS', 'SMTP', 'HTTPS']),
port=dict(default=80, type='int'),
sensitivity=dict(default='Medium', choices=['Low', 'Medium', 'High']),
- contactList=dict(default=None),
- httpFqdn=dict(required=False),
- httpFile=dict(required=False),
- httpQueryString=dict(required=False),
+ contactList=dict(),
+ httpFqdn=dict(),
+ httpFile=dict(),
+ httpQueryString=dict(),
failover=dict(default=False, type='bool'),
autoFailover=dict(default=False, type='bool'),
- ip1=dict(required=False),
- ip2=dict(required=False),
- ip3=dict(required=False),
- ip4=dict(required=False),
- ip5=dict(required=False),
+ ip1=dict(),
+ ip2=dict(),
+ ip3=dict(),
+ ip4=dict(),
+ ip5=dict(),
validate_certs=dict(default=True, type='bool'),
),
required_together=[
diff --git a/plugins/modules/dpkg_divert.py b/plugins/modules/dpkg_divert.py
index 90ce464ccd..6ef1f394e4 100644
--- a/plugins/modules/dpkg_divert.py
+++ b/plugins/modules/dpkg_divert.py
@@ -17,8 +17,8 @@ author:
- quidame (@quidame)
description:
- A diversion is for C(dpkg) the knowledge that only a given package (or the local administrator) is allowed to install
- a file at a given location. Other packages shipping their own version of this file will be forced to O(divert) it, that
- is to install it at another location. It allows one to keep changes in a file provided by a debian package by preventing
+ a file at a given location. Other packages shipping their own version of this file are forced to O(divert) it, that is
+ to install it at another location. It allows one to keep changes in a file provided by a debian package by preventing
it being overwritten on package upgrade.
- This module manages diversions of debian packages files using the C(dpkg-divert) commandline tool. It can either create
or remove a diversion for a given file, but also update an existing diversion to modify its O(holder) and/or its O(divert)
@@ -54,7 +54,7 @@ options:
type: str
divert:
description:
- - The location where the versions of file will be diverted.
+ - The location where the versions of file are diverted.
- Default is to add suffix C(.distrib) to the file path.
- This parameter is ignored when O(state=absent).
type: path
@@ -70,7 +70,7 @@ options:
force:
description:
- When O(rename=true) and O(force=true), renaming is performed even if the target of the renaming exists, in other words
- the existing contents of the file at this location will be lost.
+ the existing contents of the file at this location are lost.
- This parameter is ignored when O(rename=false).
type: bool
default: false
@@ -132,7 +132,13 @@ diversion:
state:
description: The state of the diversion.
type: str
- sample: {"divert": "/etc/foobarrc.distrib", "holder": "LOCAL", "path": "/etc/foobarrc", "state": "present"}
+ sample:
+ {
+ "divert": "/etc/foobarrc.distrib",
+ "holder": "LOCAL",
+ "path": "/etc/foobarrc",
+ "state": "present"
+ }
"""
@@ -160,11 +166,11 @@ def main():
module = AnsibleModule(
argument_spec=dict(
path=dict(required=True, type='path'),
- state=dict(required=False, type='str', default='present', choices=['absent', 'present']),
- holder=dict(required=False, type='str'),
- divert=dict(required=False, type='path'),
- rename=dict(required=False, type='bool', default=False),
- force=dict(required=False, type='bool', default=False),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ holder=dict(type='str'),
+ divert=dict(type='path'),
+ rename=dict(type='bool', default=False),
+ force=dict(type='bool', default=False),
),
supports_check_mode=True,
)
diff --git a/plugins/modules/easy_install.py b/plugins/modules/easy_install.py
index 734f0dc4df..8d0a39333e 100644
--- a/plugins/modules/easy_install.py
+++ b/plugins/modules/easy_install.py
@@ -33,8 +33,8 @@ options:
- An optional O(virtualenv) directory path to install into. If the O(virtualenv) does not exist, it is created automatically.
virtualenv_site_packages:
description:
- - Whether the virtual environment will inherit packages from the global site-packages directory. Note that if this setting
- is changed on an already existing virtual environment it will not have any effect, the environment must be deleted
+ - Whether the virtual environment inherits packages from the global site-packages directory. Note that this setting
+ has no effect on an already existing virtual environment, so if you want to change it, the environment must be deleted
and newly created.
type: bool
default: false
@@ -74,6 +74,12 @@ EXAMPLES = r"""
community.general.easy_install:
name: bottle
virtualenv: /webapps/myapp/venv
+
+- name: Install a python package using pyvenv as the virtualenv tool
+ community.general.easy_install:
+ name: package_name
+ virtualenv: /opt/myenv
+ virtualenv_command: pyvenv
"""
import os
@@ -127,14 +133,13 @@ def _get_easy_install(module, env=None, executable=None):
def main():
arg_spec = dict(
name=dict(required=True),
- state=dict(required=False,
- default='present',
+ state=dict(default='present',
choices=['present', 'latest'],
type='str'),
- virtualenv=dict(default=None, required=False),
+ virtualenv=dict(),
virtualenv_site_packages=dict(default=False, type='bool'),
- virtualenv_command=dict(default='virtualenv', required=False),
- executable=dict(default='easy_install', required=False),
+ virtualenv_command=dict(default='virtualenv'),
+ executable=dict(default='easy_install'),
)
module = AnsibleModule(argument_spec=arg_spec, supports_check_mode=True)
diff --git a/plugins/modules/elasticsearch_plugin.py b/plugins/modules/elasticsearch_plugin.py
index 3455691cd0..8552b55ccd 100644
--- a/plugins/modules/elasticsearch_plugin.py
+++ b/plugins/modules/elasticsearch_plugin.py
@@ -66,7 +66,7 @@ options:
type: bool
plugin_bin:
description:
- - Location of the plugin binary. If this file is not found, the default plugin binaries will be used.
+ - Location of the plugin binary. If this file is not found, the default plugin binaries are used.
type: path
plugin_dir:
description:
@@ -83,7 +83,7 @@ options:
type: str
version:
description:
- - Version of the plugin to be installed. If plugin exists with previous version, it will NOT be updated.
+ - Version of the plugin to be installed. If plugin exists with previous version, it is NOT updated.
type: str
"""
@@ -259,15 +259,15 @@ def main():
argument_spec=dict(
name=dict(required=True),
state=dict(default="present", choices=list(PACKAGE_STATE_MAP.keys())),
- src=dict(default=None),
- url=dict(default=None),
+ src=dict(),
+ url=dict(),
timeout=dict(default="1m"),
force=dict(type='bool', default=False),
plugin_bin=dict(type="path"),
plugin_dir=dict(default="/usr/share/elasticsearch/plugins/", type="path"),
- proxy_host=dict(default=None),
- proxy_port=dict(default=None),
- version=dict(default=None)
+ proxy_host=dict(),
+ proxy_port=dict(),
+ version=dict()
),
mutually_exclusive=[("src", "url")],
supports_check_mode=True
diff --git a/plugins/modules/emc_vnx_sg_member.py b/plugins/modules/emc_vnx_sg_member.py
index bdb86625d1..a0b1e920e2 100644
--- a/plugins/modules/emc_vnx_sg_member.py
+++ b/plugins/modules/emc_vnx_sg_member.py
@@ -77,7 +77,7 @@ EXAMPLES = r"""
RETURN = r"""
hluid:
- description: LUNID that hosts attached to the storage group will see.
+ description: LUNID visible to hosts attached to the storage group.
type: int
returned: success
"""
diff --git a/plugins/modules/facter.py b/plugins/modules/facter.py
index ce9320282d..20be3d4a4d 100644
--- a/plugins/modules/facter.py
+++ b/plugins/modules/facter.py
@@ -62,7 +62,7 @@ from ansible.module_utils.basic import AnsibleModule
def main():
module = AnsibleModule(
argument_spec=dict(
- arguments=dict(required=False, type='list', elements='str')
+ arguments=dict(type='list', elements='str')
)
)
diff --git a/plugins/modules/filesystem.py b/plugins/modules/filesystem.py
index 2edc8be5ab..f14458c337 100644
--- a/plugins/modules/filesystem.py
+++ b/plugins/modules/filesystem.py
@@ -64,10 +64,10 @@ options:
description:
- If V(true), if the block device and filesystem size differ, grow the filesystem into the space.
- Supported for C(bcachefs), C(btrfs), C(ext2), C(ext3), C(ext4), C(ext4dev), C(f2fs), C(lvm), C(xfs), C(ufs) and C(vfat)
- filesystems. Attempts to resize other filesystem types will fail.
- - XFS Will only grow if mounted. Currently, the module is based on commands from C(util-linux) package to perform operations,
+ filesystems. Attempts to resize other filesystem types fail.
+ - XFS only grows if mounted. Currently, the module is based on commands from C(util-linux) package to perform operations,
so resizing of XFS is not supported on FreeBSD systems.
- - VFAT will likely fail if C(fatresize < 1.04).
+ - VFAT is likely to fail if C(fatresize < 1.04).
- Mutually exclusive with O(uuid).
type: bool
default: false
@@ -82,7 +82,7 @@ options:
- See xfs_admin(8) (C(xfs)), tune2fs(8) (C(ext2), C(ext3), C(ext4), C(ext4dev)) for possible values.
- For O(fstype=lvm) the value is ignored, it resets the PV UUID if set.
- Supported for O(fstype) being one of C(bcachefs), C(ext2), C(ext3), C(ext4), C(ext4dev), C(lvm), or C(xfs).
- - This is B(not idempotent). Specifying this option will always result in a change.
+ - This is B(not idempotent). Specifying this option always results in a change.
- Mutually exclusive with O(resizefs).
type: str
version_added: 7.1.0
@@ -633,7 +633,7 @@ def main():
opts=dict(type='str'),
force=dict(type='bool', default=False),
resizefs=dict(type='bool', default=False),
- uuid=dict(type='str', required=False),
+ uuid=dict(type='str'),
),
required_if=[
('state', 'present', ['fstype'])
diff --git a/plugins/modules/flatpak.py b/plugins/modules/flatpak.py
index 13898c3349..98de9de3ed 100644
--- a/plugins/modules/flatpak.py
+++ b/plugins/modules/flatpak.py
@@ -27,7 +27,7 @@ attributes:
check_mode:
support: partial
details:
- - If O(state=latest), the module will always return C(changed=true).
+ - If O(state=latest), the module always returns RV(ignore:changed=true).
diff_mode:
support: none
options:
@@ -53,7 +53,7 @@ options:
- When supplying a reverse DNS name, you can use the O(remote) option to specify on what remote to look for the flatpak.
An example for a reverse DNS name is C(org.gnome.gedit).
- When used with O(state=absent) or O(state=latest), it is recommended to specify the name in the reverse DNS format.
- - When supplying a URL with O(state=absent) or O(state=latest), the module will try to match the installed flatpak based
+ - When supplying a URL with O(state=absent) or O(state=latest), the module tries to match the installed flatpak based
on the name of the flatpakref to remove or update it. However, there is no guarantee that the names of the flatpakref
file and the reverse DNS name of the installed flatpak do match.
type: list
@@ -107,6 +107,12 @@ EXAMPLES = r"""
state: present
remote: gnome
+- name: Install GIMP using custom flatpak binary path
+ community.general.flatpak:
+ name: org.gimp.GIMP
+ state: present
+ executable: /usr/local/bin/flatpak-dev
+
- name: Install multiple packages
community.general.flatpak:
name:
@@ -165,26 +171,6 @@ command:
returned: When a flatpak command has been executed
type: str
sample: "/usr/bin/flatpak install --user --nontinteractive flathub org.gnome.Calculator"
-msg:
- description: Module error message.
- returned: failure
- type: str
- sample: "Executable '/usr/local/bin/flatpak' was not found on the system."
-rc:
- description: Return code from flatpak binary.
- returned: When a flatpak command has been executed
- type: int
- sample: 0
-stderr:
- description: Error output from flatpak binary.
- returned: When a flatpak command has been executed
- type: str
- sample: "error: Error searching remote flathub: Can't find ref org.gnome.KDE"
-stdout:
- description: Output from flatpak binary.
- returned: When a flatpak command has been executed
- type: str
- sample: "org.gnome.Calendar/x86_64/stable\tcurrent\norg.gnome.gitg/x86_64/stable\tcurrent\n"
"""
from ansible.module_utils.six.moves.urllib.parse import urlparse
diff --git a/plugins/modules/flatpak_remote.py b/plugins/modules/flatpak_remote.py
index ba202d3033..641ce930d0 100644
--- a/plugins/modules/flatpak_remote.py
+++ b/plugins/modules/flatpak_remote.py
@@ -17,7 +17,7 @@ description:
- Allows users to add or remove flatpak remotes.
- The flatpak remotes concept is comparable to what is called repositories in other packaging formats.
- Currently, remote addition is only supported using C(flatpakrepo) file URLs.
- - Existing remotes will not be updated.
+ - Existing remotes are not updated.
- See the M(community.general.flatpak) module for managing flatpaks.
author:
- John Kwiatkoski (@JayKayy)
@@ -56,8 +56,8 @@ options:
name:
description:
- The desired name for the flatpak remote to be registered under on the managed host.
- - When used with O(state=present), the remote will be added to the managed host under the specified O(name).
- - When used with O(state=absent) the remote with that name will be removed.
+ - When used with O(state=present), the remote is added to the managed host under the specified O(name).
+ - When used with O(state=absent) the remote with that name is removed.
type: str
required: true
state:
@@ -112,26 +112,6 @@ command:
returned: When a flatpak command has been executed
type: str
sample: "/usr/bin/flatpak remote-add --system flatpak-test https://dl.flathub.org/repo/flathub.flatpakrepo"
-msg:
- description: Module error message.
- returned: failure
- type: str
- sample: "Executable '/usr/local/bin/flatpak' was not found on the system."
-rc:
- description: Return code from flatpak binary.
- returned: When a flatpak command has been executed
- type: int
- sample: 0
-stderr:
- description: Error output from flatpak binary.
- returned: When a flatpak command has been executed
- type: str
- sample: "error: GPG verification enabled, but no summary found (check that the configured URL in remote config is correct)\n"
-stdout:
- description: Output from flatpak binary.
- returned: When a flatpak command has been executed
- type: str
- sample: "flathub\tFlathub\thttps://dl.flathub.org/repo/\t1\t\n"
"""
from ansible.module_utils.basic import AnsibleModule
diff --git a/plugins/modules/gconftool2.py b/plugins/modules/gconftool2.py
index 86e878ed61..ef5b4381c2 100644
--- a/plugins/modules/gconftool2.py
+++ b/plugins/modules/gconftool2.py
@@ -128,7 +128,6 @@ class GConftool(StateModuleHelper):
],
supports_check_mode=True,
)
- use_old_vardict = False
def __init_module__(self):
self.runner = gconftool2_runner(self.module, check_rc=True)
diff --git a/plugins/modules/gconftool2_info.py b/plugins/modules/gconftool2_info.py
index 29965be46b..3f6aa7b2e3 100644
--- a/plugins/modules/gconftool2_info.py
+++ b/plugins/modules/gconftool2_info.py
@@ -67,7 +67,6 @@ class GConftoolInfo(ModuleHelper):
),
supports_check_mode=True,
)
- use_old_vardict = False
def __init_module__(self):
self.runner = gconftool2_runner(self.module, check_rc=True)
diff --git a/plugins/modules/gem.py b/plugins/modules/gem.py
index c01433cb90..1ea9c68a94 100644
--- a/plugins/modules/gem.py
+++ b/plugins/modules/gem.py
@@ -48,7 +48,7 @@ options:
repository:
type: str
description:
- - The repository from which the gem will be installed.
+ - The repository from which the gem is installed.
required: false
aliases: [source]
user_install:
@@ -65,7 +65,7 @@ options:
install_dir:
type: path
description:
- - Install the gems into a specific directory. These gems will be independent from the global installed ones. Specifying
+ - Install the gems into a specific directory. These gems are independent from the global installed ones. Specifying
this requires user_install to be false.
required: false
bindir:
@@ -295,22 +295,22 @@ def main():
module = AnsibleModule(
argument_spec=dict(
- executable=dict(required=False, type='path'),
- gem_source=dict(required=False, type='path'),
- include_dependencies=dict(required=False, default=True, type='bool'),
+ executable=dict(type='path'),
+ gem_source=dict(type='path'),
+ include_dependencies=dict(default=True, type='bool'),
name=dict(required=True, type='str'),
- repository=dict(required=False, aliases=['source'], type='str'),
- state=dict(required=False, default='present', choices=['present', 'absent', 'latest'], type='str'),
- user_install=dict(required=False, default=True, type='bool'),
- install_dir=dict(required=False, type='path'),
+ repository=dict(aliases=['source'], type='str'),
+ state=dict(default='present', choices=['present', 'absent', 'latest'], type='str'),
+ user_install=dict(default=True, type='bool'),
+ install_dir=dict(type='path'),
bindir=dict(type='path'),
norc=dict(type='bool', default=True),
- pre_release=dict(required=False, default=False, type='bool'),
- include_doc=dict(required=False, default=False, type='bool'),
- env_shebang=dict(required=False, default=False, type='bool'),
- version=dict(required=False, type='str'),
- build_flags=dict(required=False, type='str'),
- force=dict(required=False, default=False, type='bool'),
+ pre_release=dict(default=False, type='bool'),
+ include_doc=dict(default=False, type='bool'),
+ env_shebang=dict(default=False, type='bool'),
+ version=dict(type='str'),
+ build_flags=dict(type='str'),
+ force=dict(default=False, type='bool'),
),
supports_check_mode=True,
mutually_exclusive=[['gem_source', 'repository'], ['gem_source', 'version']],
diff --git a/plugins/modules/gio_mime.py b/plugins/modules/gio_mime.py
index 216b7faae0..b8864ea3e3 100644
--- a/plugins/modules/gio_mime.py
+++ b/plugins/modules/gio_mime.py
@@ -26,12 +26,12 @@ attributes:
options:
mime_type:
description:
- - MIME type for which a default handler will be set.
+ - MIME type for which a default handler is set.
type: str
required: true
handler:
description:
- - Default handler will be set for the MIME type.
+ - Default handler set for the MIME type.
type: str
required: true
notes:
@@ -61,18 +61,6 @@ handler:
returned: success
type: str
sample: google-chrome.desktop
-stdout:
- description:
- - The output of the C(gio) command.
- returned: success
- type: str
- sample: Set google-chrome.desktop as the default for x-scheme-handler/https
-stderr:
- description:
- - The error output of the C(gio) command.
- returned: failure
- type: str
- sample: 'gio: Failed to load info for handler "never-existed.desktop"'
version:
description: Version of gio.
type: str
@@ -94,7 +82,6 @@ class GioMime(ModuleHelper):
),
supports_check_mode=True,
)
- use_old_vardict = False
def __init_module__(self):
self.runner = gio_mime_runner(self.module, check_rc=True)
diff --git a/plugins/modules/git_config.py b/plugins/modules/git_config.py
index 6a6eff0be2..93ca6265b9 100644
--- a/plugins/modules/git_config.py
+++ b/plugins/modules/git_config.py
@@ -31,17 +31,11 @@ attributes:
diff_mode:
support: none
options:
- list_all:
- description:
- - List all settings (optionally limited to a given O(scope)).
- - This option is B(deprecated) and will be removed from community.general 11.0.0. Please use M(community.general.git_config_info)
- instead.
- type: bool
- default: false
name:
description:
- - The name of the setting. If no value is supplied, the value will be read from the config if it has been set.
+ - The name of the setting.
type: str
+ required: true
repo:
description:
- Path to a git repository for reading and writing values from a specific repo.
@@ -57,7 +51,7 @@ options:
- This is required when setting config values.
- If this is set to V(local), you must also specify the O(repo) parameter.
- If this is set to V(file), you must also specify the O(file) parameter.
- - It defaults to system only when not using O(list_all=true).
+ - It defaults to system.
choices: ["file", "local", "global", "system"]
type: str
state:
@@ -70,7 +64,7 @@ options:
value:
description:
- When specifying the name of a single setting, supply a value to set that setting to the given value.
- - From community.general 11.0.0 on, O(value) will be required if O(state=present). To read values, use the M(community.general.git_config_info)
+ - From community.general 11.0.0 on, O(value) is required if O(state=present). To read values, use the M(community.general.git_config_info)
module instead.
type: str
add_mode:
@@ -144,21 +138,6 @@ EXAMPLES = r"""
"""
RETURN = r"""
-config_value:
- description: When O(list_all=false) and value is not set, a string containing the value of the setting in name.
- returned: success
- type: str
- sample: "vim"
-
-config_values:
- description: When O(list_all=true), a dict containing key/value pairs of multiple configuration settings.
- returned: success
- type: dict
- sample:
- core.editor: "vim"
- color.ui: "auto"
- alias.diffc: "diff --cached"
- alias.remotev: "remote -v"
"""
from ansible.module_utils.basic import AnsibleModule
@@ -167,21 +146,19 @@ from ansible.module_utils.basic import AnsibleModule
def main():
module = AnsibleModule(
argument_spec=dict(
- list_all=dict(required=False, type='bool', default=False, removed_in_version='11.0.0', removed_from_collection='community.general'),
- name=dict(type='str'),
+ name=dict(type='str', required=True),
repo=dict(type='path'),
file=dict(type='path'),
- add_mode=dict(required=False, type='str', default='replace-all', choices=['add', 'replace-all']),
- scope=dict(required=False, type='str', choices=['file', 'local', 'global', 'system']),
- state=dict(required=False, type='str', default='present', choices=['present', 'absent']),
- value=dict(required=False),
+ add_mode=dict(type='str', default='replace-all', choices=['add', 'replace-all']),
+ scope=dict(type='str', choices=['file', 'local', 'global', 'system']),
+ state=dict(type='str', default='present', choices=['present', 'absent']),
+ value=dict(),
),
- mutually_exclusive=[['list_all', 'name'], ['list_all', 'value'], ['list_all', 'state']],
required_if=[
('scope', 'local', ['repo']),
- ('scope', 'file', ['file'])
+ ('scope', 'file', ['file']),
+ ('state', 'present', ['value']),
],
- required_one_of=[['list_all', 'name']],
supports_check_mode=True,
)
git_path = module.get_bin_path('git', True)
@@ -196,13 +173,8 @@ def main():
new_value = params['value'] or ''
add_mode = params['add_mode']
- if not unset and not new_value and not params['list_all']:
- module.deprecate(
- 'If state=present, a value must be specified from community.general 11.0.0 on.'
- ' To read a config value, use the community.general.git_config_info module instead.',
- version='11.0.0',
- collection_name='community.general',
- )
+ if not unset and not new_value:
+ module.fail_json(msg="If state=present, a value must be specified. Use the community.general.git_config_info module to read a config value.")
scope = determine_scope(params)
cwd = determine_cwd(scope, params)
@@ -217,33 +189,18 @@ def main():
list_args = list(base_args)
- if params['list_all']:
- list_args.append('-l')
-
- if name:
- list_args.append("--get-all")
- list_args.append(name)
+ list_args.append("--get-all")
+ list_args.append(name)
(rc, out, err) = module.run_command(list_args, cwd=cwd, expand_user_and_vars=False)
- if params['list_all'] and scope and rc == 128 and 'unable to read config file' in err:
- # This just means nothing has been set at the given scope
- module.exit_json(changed=False, msg='', config_values={})
- elif rc >= 2:
+ if rc >= 2:
# If the return code is 1, it just means the option hasn't been set yet, which is fine.
module.fail_json(rc=rc, msg=err, cmd=' '.join(list_args))
old_values = out.rstrip().splitlines()
- if params['list_all']:
- config_values = {}
- for value in old_values:
- k, v = value.split('=', 1)
- config_values[k] = v
- module.exit_json(changed=False, msg='', config_values=config_values)
- elif not new_value and not unset:
- module.exit_json(changed=False, msg='', config_value=old_values[0] if old_values else '')
- elif unset and not out:
+ if unset and not out:
module.exit_json(changed=False, msg='no setting to unset')
elif new_value in old_values and (len(old_values) == 1 or add_mode == "add") and not unset:
module.exit_json(changed=False, msg="")
@@ -286,30 +243,22 @@ def main():
def determine_scope(params):
if params['scope']:
return params['scope']
- elif params['list_all']:
- return ""
- else:
- return 'system'
+ return 'system'
def build_diff_value(value):
if not value:
return "\n"
- elif len(value) == 1:
+ if len(value) == 1:
return value[0] + "\n"
- else:
- return value
+ return value
def determine_cwd(scope, params):
if scope == 'local':
return params['repo']
- elif params['list_all'] and params['repo']:
- # Include local settings from a specific repo when listing all available settings
- return params['repo']
- else:
- # Run from root directory to avoid accidentally picking up any local config settings
- return "/"
+ # Run from root directory to avoid accidentally picking up any local config settings
+ return "/"
if __name__ == '__main__':
diff --git a/plugins/modules/git_config_info.py b/plugins/modules/git_config_info.py
index c8152cfa42..29922382de 100644
--- a/plugins/modules/git_config_info.py
+++ b/plugins/modules/git_config_info.py
@@ -26,7 +26,7 @@ options:
name:
description:
- The name of the setting to read.
- - If not provided, all settings will be returned as RV(config_values).
+ - If not provided, all settings are returned as RV(config_values).
type: str
path:
description:
@@ -94,8 +94,8 @@ config_values:
description:
- This is a dictionary mapping a git configuration setting to a list of its values.
- When O(name) is not set, all configuration settings are returned here.
- - When O(name) is set, only the setting specified in O(name) is returned here. If that setting is not set, the key will
- still be present, and its value will be an empty list.
+ - When O(name) is set, only the setting specified in O(name) is returned here. If that setting is not set, the key is
+ still present, and its value is an empty list.
returned: success
type: dict
sample:
@@ -113,7 +113,7 @@ def main():
argument_spec=dict(
name=dict(type="str"),
path=dict(type="path"),
- scope=dict(required=False, type="str", default="system", choices=["global", "system", "local", "file"]),
+ scope=dict(type="str", default="system", choices=["global", "system", "local", "file"]),
),
required_if=[
("scope", "local", ["path"]),
diff --git a/plugins/modules/github_deploy_key.py b/plugins/modules/github_deploy_key.py
index 2e5f9125ad..4ec7fbb769 100644
--- a/plugins/modules/github_deploy_key.py
+++ b/plugins/modules/github_deploy_key.py
@@ -57,8 +57,8 @@ options:
type: str
read_only:
description:
- - If V(true), the deploy key will only be able to read repository contents. Otherwise, the deploy key will be able to
- read and write.
+ - If V(true), the deploy key is only able to read repository contents. Otherwise, the deploy key is able to read and
+ write.
type: bool
default: true
state:
@@ -301,18 +301,18 @@ class GithubDeployKey(object):
def main():
module = AnsibleModule(
argument_spec=dict(
- github_url=dict(required=False, type='str', default="https://api.github.com"),
+ github_url=dict(type='str', default="https://api.github.com"),
owner=dict(required=True, type='str', aliases=['account', 'organization']),
repo=dict(required=True, type='str', aliases=['repository']),
name=dict(required=True, type='str', aliases=['title', 'label']),
key=dict(required=True, type='str', no_log=False),
- read_only=dict(required=False, type='bool', default=True),
+ read_only=dict(type='bool', default=True),
state=dict(default='present', choices=['present', 'absent']),
- force=dict(required=False, type='bool', default=False),
- username=dict(required=False, type='str'),
- password=dict(required=False, type='str', no_log=True),
- otp=dict(required=False, type='int', no_log=True),
- token=dict(required=False, type='str', no_log=True)
+ force=dict(type='bool', default=False),
+ username=dict(type='str'),
+ password=dict(type='str', no_log=True),
+ otp=dict(type='int', no_log=True),
+ token=dict(type='str', no_log=True)
),
mutually_exclusive=[
['password', 'token']
diff --git a/plugins/modules/github_key.py b/plugins/modules/github_key.py
index f3d5863d54..80b0a6bf70 100644
--- a/plugins/modules/github_key.py
+++ b/plugins/modules/github_key.py
@@ -14,6 +14,7 @@ module: github_key
short_description: Manage GitHub access keys
description:
- Creates, removes, or updates GitHub access keys.
+ - Works with both GitHub.com and GitHub Enterprise Server installations.
extends_documentation_fragment:
- community.general.attributes
attributes:
@@ -44,10 +45,16 @@ options:
type: str
force:
description:
- - The default is V(true), which will replace the existing remote key if it is different than O(pubkey). If V(false),
- the key will only be set if no key with the given O(name) exists.
+ - The default is V(true), which replaces the existing remote key if it is different than O(pubkey). If V(false), the
+ key is only set if no key with the given O(name) exists.
type: bool
default: true
+ api_url:
+ description:
+ - URL to the GitHub API if not using github.com but your own GitHub Enterprise instance.
+ type: str
+ default: 'https://api.github.com'
+ version_added: "11.0.0"
author: Robert Estelle (@erydo)
"""
@@ -57,20 +64,42 @@ deleted_keys:
description: An array of key objects that were deleted. Only present on state=absent.
type: list
returned: When state=absent
- sample: [{'id': 0, 'key': 'BASE64 encoded key', 'url': 'http://example.com/github key', 'created_at': 'YYYY-MM-DDTHH:MM:SZ',
- 'read_only': false}]
+ sample:
+ [
+ {
+ "id": 0,
+ "key": "BASE64 encoded key",
+ "url": "http://example.com/github key",
+ "created_at": "YYYY-MM-DDTHH:MM:SZ",
+ "read_only": false
+ }
+ ]
matching_keys:
description: An array of keys matching the specified name. Only present on state=present.
type: list
returned: When state=present
- sample: [{'id': 0, 'key': 'BASE64 encoded key', 'url': 'http://example.com/github key', 'created_at': 'YYYY-MM-DDTHH:MM:SZ',
- 'read_only': false}]
+ sample:
+ [
+ {
+ "id": 0,
+ "key": "BASE64 encoded key",
+ "url": "http://example.com/github key",
+ "created_at": "YYYY-MM-DDTHH:MM:SZ",
+ "read_only": false
+ }
+ ]
key:
description: Metadata about the key just created. Only present on state=present.
type: dict
returned: success
- sample: {'id': 0, 'key': 'BASE64 encoded key', 'url': 'http://example.com/github key', 'created_at': 'YYYY-MM-DDTHH:MM:SZ',
- 'read_only': false}
+ sample:
+ {
+ "id": 0,
+ "key": "BASE64 encoded key",
+ "url": "http://example.com/github key",
+ "created_at": "YYYY-MM-DDTHH:MM:SZ",
+ "read_only": false
+ }
"""
EXAMPLES = r"""
@@ -91,6 +120,14 @@ EXAMPLES = r"""
name: Access Key for Some Machine
token: '{{ github_access_token }}'
pubkey: "{{ lookup('ansible.builtin.file', '/home/foo/.ssh/id_rsa.pub') }}"
+
+# GitHub Enterprise Server usage
+- name: Authorize key with GitHub Enterprise
+ community.general.github_key:
+ name: Access Key for Some Machine
+ token: '{{ github_enterprise_token }}'
+ pubkey: "{{ lookup('ansible.builtin.file', '/home/foo/.ssh/id_rsa.pub') }}"
+ api_url: 'https://github.company.com/api/v3'
"""
import datetime
@@ -105,9 +142,6 @@ from ansible_collections.community.general.plugins.module_utils.datetime import
)
-API_BASE = 'https://api.github.com'
-
-
class GitHubResponse(object):
def __init__(self, response, info):
self.content = response.read()
@@ -127,9 +161,10 @@ class GitHubResponse(object):
class GitHubSession(object):
- def __init__(self, module, token):
+ def __init__(self, module, token, api_url):
self.module = module
self.token = token
+ self.api_url = api_url.rstrip('/')
def request(self, method, url, data=None):
headers = {
@@ -147,7 +182,7 @@ class GitHubSession(object):
def get_all_keys(session):
- url = API_BASE + '/user/keys'
+ url = session.api_url + '/user/keys'
result = []
while url:
r = session.request('GET', url)
@@ -171,7 +206,7 @@ def create_key(session, name, pubkey, check_mode):
else:
return session.request(
'POST',
- API_BASE + '/user/keys',
+ session.api_url + '/user/keys',
data=json.dumps({'title': name, 'key': pubkey})).json()
@@ -180,7 +215,7 @@ def delete_keys(session, to_delete, check_mode):
return
for key in to_delete:
- session.request('DELETE', API_BASE + '/user/keys/%s' % key["id"])
+ session.request('DELETE', session.api_url + '/user/keys/%s' % key["id"])
def ensure_key_absent(session, name, check_mode):
@@ -228,6 +263,7 @@ def main():
'pubkey': {},
'state': {'choices': ['present', 'absent'], 'default': 'present'},
'force': {'default': True, 'type': 'bool'},
+ 'api_url': {'default': 'https://api.github.com', 'type': 'str'},
}
module = AnsibleModule(
argument_spec=argument_spec,
@@ -239,6 +275,7 @@ def main():
state = module.params['state']
force = module.params['force']
pubkey = module.params.get('pubkey')
+ api_url = module.params.get('api_url')
if pubkey:
pubkey_parts = pubkey.split(' ')
@@ -248,7 +285,7 @@ def main():
elif state == 'present':
module.fail_json(msg='"pubkey" is required when state=present')
- session = GitHubSession(module, token)
+ session = GitHubSession(module, token, api_url)
if state == 'present':
result = ensure_key_present(module, session, name, pubkey, force=force,
check_mode=module.check_mode)
diff --git a/plugins/modules/github_release.py b/plugins/modules/github_release.py
index 1376bf4f3d..eae2081701 100644
--- a/plugins/modules/github_release.py
+++ b/plugins/modules/github_release.py
@@ -182,13 +182,29 @@ def main():
else:
gh_obj = github3.GitHub()
- # test if we're actually logged in
- if password or login_token:
+ # GitHub's token formats:
+ # - ghp_ - Personal access token (classic)
+ # - github_pat_ - Fine-grained personal access token
+ # - gho_ - OAuth access token
+ # - ghu_ - User access token for a GitHub App
+ # - ghs_ - Installation access token for a GitHub App
+ # - ghr_ - Refresh token for a GitHub App
+ #
+ # References:
+ # https://docs.github.com/en/authentication/keeping-your-account-and-data-secure/about-authentication-to-github#githubs-token-formats
+ #
+ # Test if we're actually logged in, but skip this check for some token prefixes
+ SKIPPED_TOKEN_PREFIXES = ['ghs_']
+ if password or (login_token and not any(login_token.startswith(prefix) for prefix in SKIPPED_TOKEN_PREFIXES)):
gh_obj.me()
except github3.exceptions.AuthenticationFailed as e:
module.fail_json(msg='Failed to connect to GitHub: %s' % to_native(e),
details="Please check username and password or token "
"for repository %s" % repo)
+ except github3.exceptions.GitHubError as e:
+ module.fail_json(msg='GitHub API error: %s' % to_native(e),
+ details="Please check username and password or token "
+ "for repository %s" % repo)
repository = gh_obj.repository(user, repo)
diff --git a/plugins/modules/github_repo.py b/plugins/modules/github_repo.py
index 2d2c6f8588..abaddb3c28 100644
--- a/plugins/modules/github_repo.py
+++ b/plugins/modules/github_repo.py
@@ -72,7 +72,7 @@ options:
organization:
description:
- Organization for the repository.
- - When O(state=present), the repository will be created in the current user profile.
+ - When O(state=present), the repository is created in the current user profile.
type: str
required: false
api_url:
@@ -246,12 +246,12 @@ def main():
password=dict(type='str', no_log=True),
access_token=dict(type='str', no_log=True),
name=dict(type='str', required=True),
- state=dict(type='str', required=False, default="present",
+ state=dict(type='str', default="present",
choices=["present", "absent"]),
- organization=dict(type='str', required=False, default=None),
+ organization=dict(type='str', ),
private=dict(type='bool'),
description=dict(type='str'),
- api_url=dict(type='str', required=False, default='https://api.github.com'),
+ api_url=dict(type='str', default='https://api.github.com'),
force_defaults=dict(type='bool', default=True),
)
module = AnsibleModule(
diff --git a/plugins/modules/github_webhook.py b/plugins/modules/github_webhook.py
index 8608c90bc9..1ae2e71aaa 100644
--- a/plugins/modules/github_webhook.py
+++ b/plugins/modules/github_webhook.py
@@ -32,7 +32,7 @@ options:
- repo
url:
description:
- - URL to which payloads will be delivered.
+ - URL to which payloads are delivered.
type: str
required: true
content_type:
@@ -208,25 +208,16 @@ def main():
argument_spec=dict(
repository=dict(type='str', required=True, aliases=['repo']),
url=dict(type='str', required=True),
- content_type=dict(
- type='str',
- choices=('json', 'form'),
- required=False,
- default='form'),
- secret=dict(type='str', required=False, no_log=True),
- insecure_ssl=dict(type='bool', required=False, default=False),
- events=dict(type='list', elements='str', required=False),
- active=dict(type='bool', required=False, default=True),
- state=dict(
- type='str',
- required=False,
- choices=('absent', 'present'),
- default='present'),
+ content_type=dict(type='str', choices=('json', 'form'), default='form'),
+ secret=dict(type='str', no_log=True),
+ insecure_ssl=dict(type='bool', default=False),
+ events=dict(type='list', elements='str', ),
+ active=dict(type='bool', default=True),
+ state=dict(type='str', choices=('absent', 'present'), default='present'),
user=dict(type='str', required=True),
- password=dict(type='str', required=False, no_log=True),
- token=dict(type='str', required=False, no_log=True),
- github_url=dict(
- type='str', required=False, default="https://api.github.com")),
+ password=dict(type='str', no_log=True),
+ token=dict(type='str', no_log=True),
+ github_url=dict(type='str', default="https://api.github.com")),
mutually_exclusive=(('password', 'token'),),
required_one_of=(("password", "token"),),
required_if=(("state", "present", ("events",)),),
diff --git a/plugins/modules/github_webhook_info.py b/plugins/modules/github_webhook_info.py
index 440a373f1d..75315c77aa 100644
--- a/plugins/modules/github_webhook_info.py
+++ b/plugins/modules/github_webhook_info.py
@@ -76,16 +76,17 @@ hooks:
type: list
elements: dict
sample:
- - {
- "has_shared_secret": true,
- "url": "https://jenkins.example.com/ghprbhook/",
- "events": ["issue_comment", "pull_request"],
- "insecure_ssl": "1",
- "content_type": "json",
- "active": true,
- "id": 6206,
- "last_response": {"status": "active", "message": "OK", "code": 200}
- }
+ - has_shared_secret: true
+ url: https://jenkins.example.com/ghprbhook/
+ events: [issue_comment, pull_request]
+ insecure_ssl: "1"
+ content_type: json
+ active: true
+ id: 6206
+ last_response:
+ status: active
+ message: OK
+ code: 200
"""
import traceback
@@ -123,10 +124,10 @@ def main():
argument_spec=dict(
repository=dict(type='str', required=True, aliases=["repo"]),
user=dict(type='str', required=True),
- password=dict(type='str', required=False, no_log=True),
- token=dict(type='str', required=False, no_log=True),
+ password=dict(type='str', no_log=True),
+ token=dict(type='str', no_log=True),
github_url=dict(
- type='str', required=False, default="https://api.github.com")),
+ type='str', default="https://api.github.com")),
mutually_exclusive=(('password', 'token'), ),
required_one_of=(("password", "token"), ),
supports_check_mode=True)
diff --git a/plugins/modules/gitlab_branch.py b/plugins/modules/gitlab_branch.py
index b32169ef5a..6ed6e6a0c5 100644
--- a/plugins/modules/gitlab_branch.py
+++ b/plugins/modules/gitlab_branch.py
@@ -118,7 +118,7 @@ def main():
argument_spec.update(
project=dict(type='str', required=True),
branch=dict(type='str', required=True),
- ref_branch=dict(type='str', required=False),
+ ref_branch=dict(type='str'),
state=dict(type='str', default="present", choices=["absent", "present"]),
)
diff --git a/plugins/modules/gitlab_deploy_key.py b/plugins/modules/gitlab_deploy_key.py
index f5ae130324..d116df0714 100644
--- a/plugins/modules/gitlab_deploy_key.py
+++ b/plugins/modules/gitlab_deploy_key.py
@@ -55,8 +55,8 @@ options:
default: false
state:
description:
- - When V(present) the deploy key added to the project if it does not exist.
- - When V(absent) it will be removed from the project if it exists.
+ - When V(present) the deploy key is added to the project if it does not exist.
+ - When V(absent) it is removed from the project if it exists.
default: present
type: str
choices: ["present", "absent"]
@@ -208,7 +208,7 @@ class GitLabDeployKey(object):
'''
def find_deploy_key(self, project, key_title):
for deploy_key in project.keys.list(**list_all_kwargs):
- if (deploy_key.title == key_title):
+ if deploy_key.title == key_title:
return deploy_key
'''
diff --git a/plugins/modules/gitlab_group.py b/plugins/modules/gitlab_group.py
index 6d03476092..d6105642b8 100644
--- a/plugins/modules/gitlab_group.py
+++ b/plugins/modules/gitlab_group.py
@@ -13,8 +13,8 @@ DOCUMENTATION = r"""
module: gitlab_group
short_description: Creates/updates/deletes GitLab Groups
description:
- - When the group does not exist in GitLab, it will be created.
- - When the group does exist and state=absent, the group will be deleted.
+ - When the group does not exist in GitLab, it is created.
+ - When the group does exist and O(state=absent), the group is deleted.
author:
- Werner Dijkerman (@dj-wasabi)
- Guillaume Martinez (@Lunik)
@@ -101,8 +101,8 @@ options:
type: str
path:
description:
- - The path of the group you want to create, this will be api_url/group_path.
- - If not supplied, the group_name will be used.
+ - The path of the group you want to create, this is O(api_url)/O(path).
+ - If not supplied, O(name) is used.
type: str
prevent_forking_outside_group:
description:
@@ -129,7 +129,7 @@ options:
service_access_tokens_expiration_enforced:
description:
- Service account token expiration.
- - Changes will not affect existing token expiration dates.
+ - Changes do not affect existing token expiration dates.
- Only available for top level groups.
type: bool
version_added: 9.5.0
diff --git a/plugins/modules/gitlab_group_access_token.py b/plugins/modules/gitlab_group_access_token.py
index bcf75e056b..0fe6c14af2 100644
--- a/plugins/modules/gitlab_group_access_token.py
+++ b/plugins/modules/gitlab_group_access_token.py
@@ -28,7 +28,7 @@ extends_documentation_fragment:
- community.general.attributes
notes:
- Access tokens can not be changed. If a parameter needs to be changed, an acceess token has to be recreated. Whether tokens
- will be recreated is controlled by the O(recreate) option, which defaults to V(never).
+ are recreated or not is controlled by the O(recreate) option, which defaults to V(never).
- Token string is contained in the result only when access token is created or recreated. It can not be fetched afterwards.
- Token matching is done by comparing O(name) option.
attributes:
@@ -55,8 +55,16 @@ options:
type: list
elements: str
aliases: ["scope"]
- choices: ["api", "read_api", "read_registry", "write_registry", "read_repository", "write_repository", "create_runner",
- "ai_features", "k8s_proxy"]
+ choices:
+ - api
+ - read_api
+ - read_registry
+ - write_registry
+ - read_repository
+ - write_repository
+ - create_runner
+ - ai_features
+ - k8s_proxy
access_level:
description:
- Access level of the access token.
@@ -71,17 +79,17 @@ options:
required: true
recreate:
description:
- - Whether the access token will be recreated if it already exists.
- - When V(never) the token will never be recreated.
- - When V(always) the token will always be recreated.
- - When V(state_change) the token will be recreated if there is a difference between desired state and actual state.
+ - Whether the access token is recreated if it already exists.
+ - When V(never) the token is never recreated.
+ - When V(always) the token is always recreated.
+ - When V(state_change) the token is recreated if there is a difference between desired state and actual state.
type: str
choices: ["never", "always", "state_change"]
default: never
state:
description:
- - When V(present) the access token will be added to the group if it does not exist.
- - When V(absent) it will be removed from the group if it exists.
+ - When V(present) the access token is added to the group if it does not exist.
+ - When V(absent) it is removed from the group if it exists.
default: present
type: str
choices: ["present", "absent"]
@@ -185,9 +193,9 @@ class GitLabGroupAccessToken(object):
@param name of the access token
'''
def find_access_token(self, group, name):
- access_tokens = group.access_tokens.list(all=True)
+ access_tokens = [x for x in group.access_tokens.list(all=True) if not getattr(x, 'revoked', False)]
for access_token in access_tokens:
- if (access_token.name == name):
+ if access_token.name == name:
self.access_token_object = access_token
return False
return False
@@ -237,7 +245,7 @@ def main():
'create_runner',
'ai_features',
'k8s_proxy']),
- access_level=dict(type='str', required=False, default='maintainer', choices=['guest', 'reporter', 'developer', 'maintainer', 'owner']),
+ access_level=dict(type='str', default='maintainer', choices=['guest', 'reporter', 'developer', 'maintainer', 'owner']),
expires_at=dict(type='str', required=True),
recreate=dict(type='str', default='never', choices=['never', 'always', 'state_change'])
))
diff --git a/plugins/modules/gitlab_group_variable.py b/plugins/modules/gitlab_group_variable.py
index 926f4fe20a..10ca467bcd 100644
--- a/plugins/modules/gitlab_group_variable.py
+++ b/plugins/modules/gitlab_group_variable.py
@@ -15,9 +15,9 @@ short_description: Creates, updates, or deletes GitLab groups variables
version_added: 1.2.0
description:
- Creates a group variable if it does not exist.
- - When a group variable does exist, its value will be updated when the values are different.
+ - When a group variable does exist, its value is updated when the values are different.
- Variables which are untouched in the playbook, but are not untouched in the GitLab group, they stay untouched (O(purge=false))
- or will be deleted (O(purge=true)).
+ or are deleted (O(purge=true)).
author:
- Florent Madiot (@scodeman)
requirements:
@@ -52,7 +52,7 @@ options:
type: bool
vars:
description:
- - When the list element is a simple key-value pair, masked, raw and protected will be set to false.
+ - When the list element is a simple key-value pair, masked, raw and protected are set to V(false).
- When the list element is a dict with the keys C(value), C(masked), C(raw) and C(protected), the user can have full
control about whether a value should be masked, raw, protected or both.
- Support for group variables requires GitLab >= 9.5.
@@ -185,22 +185,22 @@ group_variable:
description: A list of variables which were created.
returned: always
type: list
- sample: ['ACCESS_KEY_ID', 'SECRET_ACCESS_KEY']
+ sample: ["ACCESS_KEY_ID", "SECRET_ACCESS_KEY"]
untouched:
description: A list of variables which exist.
returned: always
type: list
- sample: ['ACCESS_KEY_ID', 'SECRET_ACCESS_KEY']
+ sample: ["ACCESS_KEY_ID", "SECRET_ACCESS_KEY"]
removed:
description: A list of variables which were deleted.
returned: always
type: list
- sample: ['ACCESS_KEY_ID', 'SECRET_ACCESS_KEY']
+ sample: ["ACCESS_KEY_ID", "SECRET_ACCESS_KEY"]
updated:
description: A list of variables whose values were changed.
returned: always
type: list
- sample: ['ACCESS_KEY_ID', 'SECRET_ACCESS_KEY']
+ sample: ["ACCESS_KEY_ID", "SECRET_ACCESS_KEY"]
"""
from ansible.module_utils.basic import AnsibleModule
@@ -372,11 +372,11 @@ def main():
argument_spec.update(auth_argument_spec())
argument_spec.update(
group=dict(type='str', required=True),
- purge=dict(type='bool', required=False, default=False),
- vars=dict(type='dict', required=False, default=dict(), no_log=True),
+ purge=dict(type='bool', default=False),
+ vars=dict(type='dict', default=dict(), no_log=True),
# please mind whenever changing the variables dict to also change module_utils/gitlab.py's
# KNOWN dict in filter_returned_variables or bad evil will happen
- variables=dict(type='list', elements='dict', required=False, default=list(), options=dict(
+ variables=dict(type='list', elements='dict', default=list(), options=dict(
name=dict(type='str', required=True),
value=dict(type='str', no_log=True),
masked=dict(type='bool', default=False),
diff --git a/plugins/modules/gitlab_hook.py b/plugins/modules/gitlab_hook.py
index cb132c8aaa..87c8aa635a 100644
--- a/plugins/modules/gitlab_hook.py
+++ b/plugins/modules/gitlab_hook.py
@@ -45,8 +45,8 @@ options:
type: str
state:
description:
- - When V(present) the hook will be updated to match the input or created if it does not exist.
- - When V(absent) hook will be deleted if it exists.
+ - When V(present) the hook is updated to match the input or created if it does not exist.
+ - When V(absent) hook is deleted if it exists.
default: present
type: str
choices: ["present", "absent"]
@@ -103,15 +103,15 @@ options:
version_added: '8.4.0'
hook_validate_certs:
description:
- - Whether GitLab will do SSL verification when triggering the hook.
+ - Whether GitLab performs SSL verification when triggering the hook.
type: bool
default: false
aliases: [enable_ssl_verification]
token:
description:
- Secret token to validate hook messages at the receiver.
- - If this is present it will always result in a change as it cannot be retrieved from GitLab.
- - Will show up in the X-GitLab-Token HTTP request header.
+ - If this is present it always results in a change as it cannot be retrieved from GitLab.
+ - It shows up in the C(X-GitLab-Token) HTTP request header.
required: false
type: str
"""
@@ -271,7 +271,7 @@ class GitLabHook(object):
'''
def find_hook(self, project, hook_url):
for hook in project.hooks.list(**list_all_kwargs):
- if (hook.url == hook_url):
+ if hook.url == hook_url:
return hook
'''
@@ -307,7 +307,7 @@ def main():
job_events=dict(type='bool', default=False),
pipeline_events=dict(type='bool', default=False),
wiki_page_events=dict(type='bool', default=False),
- releases_events=dict(type='bool', default=None),
+ releases_events=dict(type='bool'),
hook_validate_certs=dict(type='bool', default=False, aliases=['enable_ssl_verification']),
token=dict(type='str', no_log=True),
))
diff --git a/plugins/modules/gitlab_instance_variable.py b/plugins/modules/gitlab_instance_variable.py
index 2023b0ad7d..0f2c9b7752 100644
--- a/plugins/modules/gitlab_instance_variable.py
+++ b/plugins/modules/gitlab_instance_variable.py
@@ -16,10 +16,10 @@ short_description: Creates, updates, or deletes GitLab instance variables
version_added: 7.1.0
description:
- Creates a instance variable if it does not exist.
- - When a instance variable does exist, its value will be updated if the values are different.
+ - When a instance variable does exist, its value is updated if the values are different.
- Support for instance variables requires GitLab >= 13.0.
- - Variables which are not mentioned in the modules options, but are present on the GitLab instance, will either stay (O(purge=false))
- or will be deleted (O(purge=true)).
+ - Variables which are not mentioned in the modules options, but are present on the GitLab instance, either stay (O(purge=false))
+ or are deleted (O(purge=true)).
author:
- Benedikt Braunger (@benibr)
requirements:
@@ -124,22 +124,22 @@ instance_variable:
description: A list of variables which were created.
returned: always
type: list
- sample: ['ACCESS_KEY_ID', 'SECRET_ACCESS_KEY']
+ sample: ["ACCESS_KEY_ID", "SECRET_ACCESS_KEY"]
untouched:
description: A list of variables which exist.
returned: always
type: list
- sample: ['ACCESS_KEY_ID', 'SECRET_ACCESS_KEY']
+ sample: ["ACCESS_KEY_ID", "SECRET_ACCESS_KEY"]
removed:
description: A list of variables which were deleted.
returned: always
type: list
- sample: ['ACCESS_KEY_ID', 'SECRET_ACCESS_KEY']
+ sample: ["ACCESS_KEY_ID", "SECRET_ACCESS_KEY"]
updated:
description: A list pre-existing variables whose values have been set.
returned: always
type: list
- sample: ['ACCESS_KEY_ID', 'SECRET_ACCESS_KEY']
+ sample: ["ACCESS_KEY_ID", "SECRET_ACCESS_KEY"]
"""
from ansible.module_utils.basic import AnsibleModule
@@ -301,8 +301,8 @@ def main():
argument_spec = basic_auth_argument_spec()
argument_spec.update(auth_argument_spec())
argument_spec.update(
- purge=dict(type='bool', required=False, default=False),
- variables=dict(type='list', elements='dict', required=False, default=list(), options=dict(
+ purge=dict(type='bool', default=False),
+ variables=dict(type='list', elements='dict', default=list(), options=dict(
name=dict(type='str', required=True),
value=dict(type='str', no_log=True),
masked=dict(type='bool', default=False),
diff --git a/plugins/modules/gitlab_issue.py b/plugins/modules/gitlab_issue.py
index 47b6f072e8..c6bf6f8328 100644
--- a/plugins/modules/gitlab_issue.py
+++ b/plugins/modules/gitlab_issue.py
@@ -18,8 +18,8 @@ short_description: Create, update, or delete GitLab issues
version_added: '8.1.0'
description:
- Creates an issue if it does not exist.
- - When an issue does exist, it will be updated if the provided parameters are different.
- - When an issue does exist and O(state=absent), the issue will be deleted.
+ - When an issue does exist, it is updated if the provided parameters are different.
+ - When an issue does exist and O(state=absent), the issue is deleted.
- When multiple issues are detected, the task fails.
- Existing issues are matched based on O(title) and O(state_filter) filters.
author:
@@ -284,13 +284,13 @@ def main():
argument_spec = basic_auth_argument_spec()
argument_spec.update(auth_argument_spec())
argument_spec.update(
- assignee_ids=dict(type='list', elements='str', required=False),
- description=dict(type='str', required=False),
- description_path=dict(type='path', required=False),
- issue_type=dict(type='str', default='issue', choices=["issue", "incident", "test_case"], required=False),
- labels=dict(type='list', elements='str', required=False),
- milestone_search=dict(type='str', required=False),
- milestone_group_id=dict(type='str', required=False),
+ assignee_ids=dict(type='list', elements='str'),
+ description=dict(type='str'),
+ description_path=dict(type='path'),
+ issue_type=dict(type='str', default='issue', choices=["issue", "incident", "test_case"]),
+ labels=dict(type='list', elements='str'),
+ milestone_search=dict(type='str'),
+ milestone_group_id=dict(type='str'),
project=dict(type='str', required=True),
state=dict(type='str', default="present", choices=["absent", "present"]),
state_filter=dict(type='str', default="opened", choices=["opened", "closed"]),
diff --git a/plugins/modules/gitlab_label.py b/plugins/modules/gitlab_label.py
index 8b9503e325..a139d1fcbd 100644
--- a/plugins/modules/gitlab_label.py
+++ b/plugins/modules/gitlab_label.py
@@ -12,8 +12,8 @@ module: gitlab_label
short_description: Creates/updates/deletes GitLab Labels belonging to project or group
version_added: 8.3.0
description:
- - When a label does not exist, it will be created.
- - When a label does exist, its value will be updated when the values are different.
+ - When a label does not exist, it is created.
+ - When a label does exist, its value is updated when the values are different.
- Labels can be purged.
author:
- "Gabriele Pongelli (@gpongelli)"
@@ -197,22 +197,22 @@ labels:
description: A list of labels which were created.
returned: always
type: list
- sample: ['abcd', 'label-one']
+ sample: ["abcd", "label-one"]
untouched:
description: A list of labels which exist.
returned: always
type: list
- sample: ['defg', 'new-label']
+ sample: ["defg", "new-label"]
removed:
description: A list of labels which were deleted.
returned: always
type: list
- sample: ['defg', 'new-label']
+ sample: ["defg", "new-label"]
updated:
description: A list pre-existing labels whose values have been set.
returned: always
type: list
- sample: ['defg', 'new-label']
+ sample: ["defg", "new-label"]
labels_obj:
description: API object.
returned: success
@@ -410,16 +410,16 @@ def main():
argument_spec = basic_auth_argument_spec()
argument_spec.update(auth_argument_spec())
argument_spec.update(
- project=dict(type='str', required=False, default=None),
- group=dict(type='str', required=False, default=None),
- purge=dict(type='bool', required=False, default=False),
- labels=dict(type='list', elements='dict', required=False, default=list(),
+ project=dict(type='str'),
+ group=dict(type='str'),
+ purge=dict(type='bool', default=False),
+ labels=dict(type='list', elements='dict', default=list(),
options=dict(
name=dict(type='str', required=True),
- color=dict(type='str', required=False),
- description=dict(type='str', required=False),
- priority=dict(type='int', required=False),
- new_name=dict(type='str', required=False),)
+ color=dict(type='str'),
+ description=dict(type='str'),
+ priority=dict(type='int'),
+ new_name=dict(type='str'),)
),
state=dict(type='str', default="present", choices=["absent", "present"]),
)
diff --git a/plugins/modules/gitlab_merge_request.py b/plugins/modules/gitlab_merge_request.py
index fd6068980a..922b224c1f 100644
--- a/plugins/modules/gitlab_merge_request.py
+++ b/plugins/modules/gitlab_merge_request.py
@@ -18,8 +18,8 @@ short_description: Create, update, or delete GitLab merge requests
version_added: 7.1.0
description:
- Creates a merge request if it does not exist.
- - When a single merge request does exist, it will be updated if the provided parameters are different.
- - When a single merge request does exist and O(state=absent), the merge request will be deleted.
+ - When a single merge request does exist, it is updated if the provided parameters are different.
+ - When a single merge request does exist and O(state=absent), the merge request is deleted.
- When multiple merge requests are detected, the task fails.
- Existing merge requests are matched based on O(title), O(source_branch), O(target_branch), and O(state_filter) filters.
author:
@@ -287,13 +287,13 @@ def main():
source_branch=dict(type='str', required=True),
target_branch=dict(type='str', required=True),
title=dict(type='str', required=True),
- description=dict(type='str', required=False),
- labels=dict(type='str', default="", required=False),
- description_path=dict(type='path', required=False),
- remove_source_branch=dict(type='bool', default=False, required=False),
+ description=dict(type='str'),
+ labels=dict(type='str', default=""),
+ description_path=dict(type='path'),
+ remove_source_branch=dict(type='bool', default=False),
state_filter=dict(type='str', default="opened", choices=["opened", "closed", "locked", "merged"]),
- assignee_ids=dict(type='str', required=False),
- reviewer_ids=dict(type='str', required=False),
+ assignee_ids=dict(type='str'),
+ reviewer_ids=dict(type='str'),
state=dict(type='str', default="present", choices=["absent", "present"]),
)
diff --git a/plugins/modules/gitlab_milestone.py b/plugins/modules/gitlab_milestone.py
index 99b922c4dd..1406f96ffb 100644
--- a/plugins/modules/gitlab_milestone.py
+++ b/plugins/modules/gitlab_milestone.py
@@ -12,8 +12,8 @@ module: gitlab_milestone
short_description: Creates/updates/deletes GitLab Milestones belonging to project or group
version_added: 8.3.0
description:
- - When a milestone does not exist, it will be created.
- - When a milestone does exist, its value will be updated when the values are different.
+ - When a milestone does not exist, it is created.
+ - When a milestone does exist, its value is updated when the values are different.
- Milestones can be purged.
author:
- "Gabriele Pongelli (@gpongelli)"
@@ -181,22 +181,22 @@ milestones:
description: A list of milestones which were created.
returned: always
type: list
- sample: ['abcd', 'milestone-one']
+ sample: ["abcd", "milestone-one"]
untouched:
description: A list of milestones which exist.
returned: always
type: list
- sample: ['defg', 'new-milestone']
+ sample: ["defg", "new-milestone"]
removed:
description: A list of milestones which were deleted.
returned: always
type: list
- sample: ['defg', 'new-milestone']
+ sample: ["defg", "new-milestone"]
updated:
description: A list pre-existing milestones whose values have been set.
returned: always
type: list
- sample: ['defg', 'new-milestone']
+ sample: ["defg", "new-milestone"]
milestones_obj:
description: API object.
returned: success
@@ -411,15 +411,15 @@ def main():
argument_spec = basic_auth_argument_spec()
argument_spec.update(auth_argument_spec())
argument_spec.update(
- project=dict(type='str', required=False, default=None),
- group=dict(type='str', required=False, default=None),
- purge=dict(type='bool', required=False, default=False),
- milestones=dict(type='list', elements='dict', required=False, default=list(),
+ project=dict(type='str'),
+ group=dict(type='str'),
+ purge=dict(type='bool', default=False),
+ milestones=dict(type='list', elements='dict', default=list(),
options=dict(
title=dict(type='str', required=True),
- description=dict(type='str', required=False),
- due_date=dict(type='str', required=False),
- start_date=dict(type='str', required=False),)
+ description=dict(type='str'),
+ due_date=dict(type='str'),
+ start_date=dict(type='str'),)
),
state=dict(type='str', default="present", choices=["absent", "present"]),
)
diff --git a/plugins/modules/gitlab_project.py b/plugins/modules/gitlab_project.py
index eec2b0fa7a..1e2140e24a 100644
--- a/plugins/modules/gitlab_project.py
+++ b/plugins/modules/gitlab_project.py
@@ -13,9 +13,9 @@ DOCUMENTATION = r"""
module: gitlab_project
short_description: Creates/updates/deletes GitLab Projects
description:
- - When the project does not exist in GitLab, it will be created.
- - When the project does exist and O(state=absent), the project will be deleted.
- - When changes are made to the project, the project will be updated.
+ - When the project does not exist in GitLab, it is created.
+ - When the project does exist and O(state=absent), the project is deleted.
+ - When changes are made to the project, the project is updated.
author:
- Werner Dijkerman (@dj-wasabi)
- Guillaume Martinez (@Lunik)
@@ -47,7 +47,7 @@ options:
build_timeout:
description:
- Maximum number of seconds a CI job can run.
- - If not specified on creation, GitLab will impose a default value.
+ - If not specified on creation, GitLab imposes a default value.
type: int
version_added: "10.6.0"
builds_access_level:
@@ -148,7 +148,7 @@ options:
type: str
import_url:
description:
- - Git repository which will be imported into gitlab.
+ - Git repository which is imported into gitlab.
- GitLab server needs read access to this git repository.
required: false
type: str
@@ -162,7 +162,7 @@ options:
version_added: "6.4.0"
initialize_with_readme:
description:
- - Will initialize the project with a default C(README.md).
+ - Initializes the project with a default C(README.md).
- Is only used when the project is created, and ignored otherwise.
type: bool
default: false
@@ -248,8 +248,8 @@ options:
version_added: "9.3.0"
path:
description:
- - The path of the project you want to create, this will be server_url//path.
- - If not supplied, name will be used.
+ - The path of the project you want to create, this is server_url/O(group)/O(path).
+ - If not supplied, O(name) is used.
type: str
releases_access_level:
description:
@@ -600,7 +600,7 @@ def main():
builds_access_level=dict(type='str', choices=['private', 'disabled', 'enabled']),
build_timeout=dict(type='int'),
ci_config_path=dict(type='str'),
- container_expiration_policy=dict(type='dict', default=None, options=dict(
+ container_expiration_policy=dict(type='dict', options=dict(
cadence=dict(type='str', choices=["1d", "7d", "14d", "1month", "3month"]),
enabled=dict(type='bool'),
keep_n=dict(type='int', choices=[0, 1, 5, 10, 25, 50, 100]),
diff --git a/plugins/modules/gitlab_project_access_token.py b/plugins/modules/gitlab_project_access_token.py
index a93d5531bf..3747870d9a 100644
--- a/plugins/modules/gitlab_project_access_token.py
+++ b/plugins/modules/gitlab_project_access_token.py
@@ -28,7 +28,7 @@ extends_documentation_fragment:
- community.general.attributes
notes:
- Access tokens can not be changed. If a parameter needs to be changed, an acceess token has to be recreated. Whether tokens
- will be recreated is controlled by the O(recreate) option, which defaults to V(never).
+ are recreated or not is controlled by the O(recreate) option, which defaults to V(never).
- Token string is contained in the result only when access token is created or recreated. It can not be fetched afterwards.
- Token matching is done by comparing O(name) option.
attributes:
@@ -55,8 +55,16 @@ options:
type: list
elements: str
aliases: ["scope"]
- choices: ["api", "read_api", "read_registry", "write_registry", "read_repository", "write_repository", "create_runner",
- "ai_features", "k8s_proxy"]
+ choices:
+ - api
+ - read_api
+ - read_registry
+ - write_registry
+ - read_repository
+ - write_repository
+ - create_runner
+ - ai_features
+ - k8s_proxy
access_level:
description:
- Access level of the access token.
@@ -71,17 +79,17 @@ options:
required: true
recreate:
description:
- - Whether the access token will be recreated if it already exists.
- - When V(never) the token will never be recreated.
- - When V(always) the token will always be recreated.
- - When V(state_change) the token will be recreated if there is a difference between desired state and actual state.
+ - Whether the access token is recreated if it already exists.
+ - When V(never) the token is never recreated.
+ - When V(always) the token is always recreated.
+ - When V(state_change) the token is recreated if there is a difference between desired state and actual state.
type: str
choices: ["never", "always", "state_change"]
default: never
state:
description:
- - When V(present) the access token will be added to the project if it does not exist.
- - When V(absent) it will be removed from the project if it exists.
+ - When V(present) the access token is added to the project if it does not exist.
+ - When V(absent) it is removed from the project if it exists.
default: present
type: str
choices: ["present", "absent"]
@@ -183,9 +191,9 @@ class GitLabProjectAccessToken(object):
@param name of the access token
'''
def find_access_token(self, project, name):
- access_tokens = project.access_tokens.list(all=True)
+ access_tokens = [x for x in project.access_tokens.list(all=True) if not getattr(x, 'revoked', False)]
for access_token in access_tokens:
- if (access_token.name == name):
+ if access_token.name == name:
self.access_token_object = access_token
return False
return False
@@ -235,7 +243,7 @@ def main():
'create_runner',
'ai_features',
'k8s_proxy']),
- access_level=dict(type='str', required=False, default='maintainer', choices=['guest', 'reporter', 'developer', 'maintainer', 'owner']),
+ access_level=dict(type='str', default='maintainer', choices=['guest', 'reporter', 'developer', 'maintainer', 'owner']),
expires_at=dict(type='str', required=True),
recreate=dict(type='str', default='never', choices=['never', 'always', 'state_change'])
))
diff --git a/plugins/modules/gitlab_project_variable.py b/plugins/modules/gitlab_project_variable.py
index 5903c9b5c4..4c261f5978 100644
--- a/plugins/modules/gitlab_project_variable.py
+++ b/plugins/modules/gitlab_project_variable.py
@@ -11,10 +11,10 @@ DOCUMENTATION = r"""
module: gitlab_project_variable
short_description: Creates/updates/deletes GitLab Projects Variables
description:
- - When a project variable does not exist, it will be created.
- - When a project variable does exist, its value will be updated when the values are different.
+ - When a project variable does not exist, it is created.
+ - When a project variable does exist, its value is updated when the values are different.
- Variables which are untouched in the playbook, but are not untouched in the GitLab project, they stay untouched (O(purge=false))
- or will be deleted (O(purge=true)).
+ or are deleted (O(purge=true)).
author:
- "Markus Bergholz (@markuman)"
requirements:
@@ -45,12 +45,12 @@ options:
type: str
purge:
description:
- - When set to true, all variables which are not untouched in the task will be deleted.
+ - When set to V(true), all variables which are not untouched in the task are deleted.
default: false
type: bool
vars:
description:
- - When the list element is a simple key-value pair, masked, raw and protected will be set to false.
+ - When the list element is a simple key-value pair, masked, raw and protected are set to V(false).
- When the list element is a dict with the keys C(value), C(masked), C(raw) and C(protected), the user can have full
control about whether a value should be masked, raw, protected or both.
- Support for protected values requires GitLab >= 9.3.
@@ -202,22 +202,22 @@ project_variable:
description: A list of variables which were created.
returned: always
type: list
- sample: ['ACCESS_KEY_ID', 'SECRET_ACCESS_KEY']
+ sample: ["ACCESS_KEY_ID", "SECRET_ACCESS_KEY"]
untouched:
description: A list of variables which exist.
returned: always
type: list
- sample: ['ACCESS_KEY_ID', 'SECRET_ACCESS_KEY']
+ sample: ["ACCESS_KEY_ID", "SECRET_ACCESS_KEY"]
removed:
description: A list of variables which were deleted.
returned: always
type: list
- sample: ['ACCESS_KEY_ID', 'SECRET_ACCESS_KEY']
+ sample: ["ACCESS_KEY_ID", "SECRET_ACCESS_KEY"]
updated:
description: A list of variables whose values were changed.
returned: always
type: list
- sample: ['ACCESS_KEY_ID', 'SECRET_ACCESS_KEY']
+ sample: ["ACCESS_KEY_ID", "SECRET_ACCESS_KEY"]
"""
from ansible.module_utils.basic import AnsibleModule
@@ -394,11 +394,11 @@ def main():
argument_spec.update(auth_argument_spec())
argument_spec.update(
project=dict(type='str', required=True),
- purge=dict(type='bool', required=False, default=False),
- vars=dict(type='dict', required=False, default=dict(), no_log=True),
+ purge=dict(type='bool', default=False),
+ vars=dict(type='dict', default=dict(), no_log=True),
# please mind whenever changing the variables dict to also change module_utils/gitlab.py's
# KNOWN dict in filter_returned_variables or bad evil will happen
- variables=dict(type='list', elements='dict', required=False, default=list(), options=dict(
+ variables=dict(type='list', elements='dict', default=list(), options=dict(
name=dict(type='str', required=True),
value=dict(type='str', no_log=True),
masked=dict(type='bool', default=False),
diff --git a/plugins/modules/gitlab_runner.py b/plugins/modules/gitlab_runner.py
index 62875c552a..87ba152ffa 100644
--- a/plugins/modules/gitlab_runner.py
+++ b/plugins/modules/gitlab_runner.py
@@ -81,8 +81,8 @@ options:
description:
- The registration token is used to register new runners before GitLab 16.0.
- Required if O(state=present) for GitLab < 16.0.
- - If set, the runner will be created using the old runner creation workflow.
- - If not set, the runner will be created using the new runner creation workflow, introduced in GitLab 16.0.
+ - If set, the runner is created using the old runner creation workflow.
+ - If not set, the runner is created using the new runner creation workflow, introduced in GitLab 16.0.
- If not set, requires python-gitlab >= 4.0.0.
type: str
owned:
@@ -122,8 +122,8 @@ options:
- If set to V(not_protected), runner can pick up jobs from both protected and unprotected branches.
- If set to V(ref_protected), runner can pick up jobs only from protected branches.
- Before community.general 8.0.0 the default was V(ref_protected). This was changed to no default in community.general
- 8.0.0. If this option is not specified explicitly, GitLab will use V(not_protected) on creation, and the value set
- will not be changed on any updates.
+ 8.0.0. If this option is not specified explicitly, GitLab uses V(not_protected) on creation, and the value set is
+ not changed on any updates.
required: false
choices: ["not_protected", "ref_protected"]
type: str
@@ -393,10 +393,10 @@ class GitLabRunner(object):
# python-gitlab 2.2 through at least 2.5 returns a list of dicts for list() instead of a Runner
# object, so we need to handle both
if hasattr(runner, "description"):
- if (runner.description == description):
+ if runner.description == description:
return self._gitlab.runners.get(runner.id)
else:
- if (runner['description'] == description):
+ if runner['description'] == description:
return self._gitlab.runners.get(runner['id'])
'''
diff --git a/plugins/modules/gitlab_user.py b/plugins/modules/gitlab_user.py
index 3be684b1e9..4d7bd506f6 100644
--- a/plugins/modules/gitlab_user.py
+++ b/plugins/modules/gitlab_user.py
@@ -14,10 +14,10 @@ DOCUMENTATION = r"""
module: gitlab_user
short_description: Creates/updates/deletes/blocks/unblocks GitLab Users
description:
- - When the user does not exist in GitLab, it will be created.
- - When the user exists and state=absent, the user will be deleted.
- - When the user exists and state=blocked, the user will be blocked.
- - When changes are made to user, the user will be updated.
+ - When the user does not exist in GitLab, it is created.
+ - When the user exists and state=absent, the user is deleted.
+ - When the user exists and state=blocked, the user is blocked.
+ - When changes are made to user, the user is updated.
notes:
- From community.general 0.2.0 and onwards, name, email and password are optional while deleting the user.
author:
diff --git a/plugins/modules/grove.py b/plugins/modules/grove.py
index abdc303f90..81417657c8 100644
--- a/plugins/modules/grove.py
+++ b/plugins/modules/grove.py
@@ -51,8 +51,8 @@ options:
required: false
validate_certs:
description:
- - If V(false), SSL certificates will not be validated. This should only be used on personally controlled sites using
- self-signed certificates.
+ - If V(false), SSL certificates are not validated. This should only be used on personally controlled sites using self-signed
+ certificates.
default: true
type: bool
author: "Jonas Pfenniger (@zimbatm)"
@@ -101,8 +101,8 @@ def main():
channel_token=dict(type='str', required=True, no_log=True),
message_content=dict(type='str', required=True),
service=dict(type='str', default='ansible'),
- url=dict(type='str', default=None),
- icon_url=dict(type='str', default=None),
+ url=dict(type='str'),
+ icon_url=dict(type='str'),
validate_certs=dict(default=True, type='bool'),
)
)
diff --git a/plugins/modules/gunicorn.py b/plugins/modules/gunicorn.py
index 8118e0f60d..b524165c90 100644
--- a/plugins/modules/gunicorn.py
+++ b/plugins/modules/gunicorn.py
@@ -50,8 +50,8 @@ options:
pid:
type: path
description:
- - A filename to use for the PID file. If not set and not found on the configuration file a tmp pid file will be created
- to check a successful run of gunicorn.
+ - A filename to use for the PID file. If not set and not found on the configuration file a tmp pid file is created to
+ check a successful run of gunicorn.
worker:
type: str
choices: ['sync', 'eventlet', 'gevent', 'tornado ', 'gthread', 'gaiohttp']
@@ -62,8 +62,8 @@ options:
description:
- Switch worker processes to run as this user.
notes:
- - If not specified on config file, a temporary error log will be created on /tmp dir. Please make sure you have write access
- in /tmp dir. Not needed but will help you to identify any problem with configuration.
+ - If not specified on config file, a temporary error log is created on C(/tmp) directory. Please make sure you have write
+ access in C(/tmp) directory. Not needed but it is helpful to identify any problem with configuration.
"""
EXAMPLES = r"""
diff --git a/plugins/modules/haproxy.py b/plugins/modules/haproxy.py
index 9c60e59040..b0e56de061 100644
--- a/plugins/modules/haproxy.py
+++ b/plugins/modules/haproxy.py
@@ -32,7 +32,7 @@ options:
backend:
description:
- Name of the HAProxy backend pool.
- - If this parameter is unset, it will be auto-detected.
+ - If this parameter is unset, it is auto-detected.
type: str
drain:
description:
@@ -62,8 +62,7 @@ options:
state:
description:
- Desired state of the provided backend host.
- - Note that V(drain) state is supported only by HAProxy version 1.5 or later. When used on versions < 1.5, it will be
- ignored.
+ - Note that V(drain) state is supported only by HAProxy version 1.5 or later. When used on versions < 1.5, it is ignored.
type: str
required: true
choices: [disabled, drain, enabled]
@@ -103,7 +102,7 @@ options:
weight:
description:
- The value passed in argument.
- - If the value ends with the V(%) sign, then the new weight will be relative to the initially configured weight.
+ - If the value ends with the V(%) sign, then the new weight is relative to the initially configured weight.
- Relative weights are only permitted between 0 and 100% and absolute weights are permitted between 0 and 256.
type: str
"""
diff --git a/plugins/modules/hg.py b/plugins/modules/hg.py
index f269628abb..982364504c 100644
--- a/plugins/modules/hg.py
+++ b/plugins/modules/hg.py
@@ -61,15 +61,15 @@ options:
default: true
executable:
description:
- - Path to hg executable to use. If not supplied, the normal mechanism for resolving binary paths will be used.
+ - Path to C(hg) executable to use. If not supplied, the normal mechanism for resolving binary paths is used.
type: str
notes:
- This module does not support push capability. See U(https://github.com/ansible/ansible/issues/31156).
- - 'If the task seems to be hanging, first verify remote host is in C(known_hosts). SSH will prompt user to authorize the
- first contact with a remote host. To avoid this prompt, one solution is to add the remote host public key in C(/etc/ssh/ssh_known_hosts)
+ - 'If the task seems to be hanging, first verify remote host is in C(known_hosts). SSH prompts user to authorize the first
+ contact with a remote host. To avoid this prompt, one solution is to add the remote host public key in C(/etc/ssh/ssh_known_hosts)
before calling the hg module, with the following command: C(ssh-keyscan remote_host.com >> /etc/ssh/ssh_known_hosts).'
- As per 01 Dec 2018, Bitbucket has dropped support for TLSv1 and TLSv1.1 connections. As such, if the underlying system
- still uses a Python version below 2.7.9, you will have issues checking out bitbucket repositories. See
+ still uses a Python version below 2.7.9, you are bound to have issues checking out bitbucket repositories. See
U(https://bitbucket.org/blog/deprecating-tlsv1-tlsv1-1-2018-12-01).
"""
@@ -222,12 +222,12 @@ def main():
argument_spec=dict(
repo=dict(type='str', required=True, aliases=['name']),
dest=dict(type='path'),
- revision=dict(type='str', default=None, aliases=['version']),
+ revision=dict(type='str', aliases=['version']),
force=dict(type='bool', default=False),
purge=dict(type='bool', default=False),
update=dict(type='bool', default=True),
clone=dict(type='bool', default=True),
- executable=dict(type='str', default=None),
+ executable=dict(type='str'),
),
)
repo = module.params['repo']
diff --git a/plugins/modules/hipchat.py b/plugins/modules/hipchat.py
deleted file mode 100644
index 14b8bb2cb4..0000000000
--- a/plugins/modules/hipchat.py
+++ /dev/null
@@ -1,222 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-#
-# Copyright Ansible Project
-# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = r"""
-module: hipchat
-short_description: Send a message to Hipchat
-description:
- - Send a message to a Hipchat room, with options to control the formatting.
-extends_documentation_fragment:
- - community.general.attributes
-deprecated:
- removed_in: 11.0.0
- why: The hipchat service has been discontinued and the self-hosted variant has been End of Life since 2020.
- alternative: There is none.
-attributes:
- check_mode:
- support: full
- diff_mode:
- support: none
-options:
- token:
- type: str
- description:
- - API token.
- required: true
- room:
- type: str
- description:
- - ID or name of the room.
- required: true
- msg_from:
- type: str
- description:
- - Name the message will appear to be sent from. Max length is 15 characters - above this it will be truncated.
- default: Ansible
- aliases: [from]
- msg:
- type: str
- description:
- - The message body.
- required: true
- color:
- type: str
- description:
- - Background color for the message.
- default: yellow
- choices: ["yellow", "red", "green", "purple", "gray", "random"]
- msg_format:
- type: str
- description:
- - Message format.
- default: text
- choices: ["text", "html"]
- notify:
- description:
- - If true, a notification will be triggered for users in the room.
- type: bool
- default: true
- validate_certs:
- description:
- - If V(false), SSL certificates will not be validated. This should only be used on personally controlled sites using
- self-signed certificates.
- type: bool
- default: true
- api:
- type: str
- description:
- - API URL if using a self-hosted hipchat server. For Hipchat API version 2 use the default URI with C(/v2) instead of
- C(/v1).
- default: 'https://api.hipchat.com/v1'
-
-author:
- - Shirou Wakayama (@shirou)
- - Paul Bourdel (@pb8226)
-"""
-
-EXAMPLES = r"""
-- name: Send a message to a Hipchat room
- community.general.hipchat:
- room: notif
- msg: Ansible task finished
-
-- name: Send a message to a Hipchat room using Hipchat API version 2
- community.general.hipchat:
- api: https://api.hipchat.com/v2/
- token: OAUTH2_TOKEN
- room: notify
- msg: Ansible task finished
-"""
-
-# ===========================================
-# HipChat module specific support methods.
-#
-
-import json
-import traceback
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible.module_utils.six.moves.urllib.parse import urlencode
-from ansible.module_utils.six.moves.urllib.request import pathname2url
-from ansible.module_utils.common.text.converters import to_native
-from ansible.module_utils.urls import fetch_url
-
-
-DEFAULT_URI = "https://api.hipchat.com/v1"
-
-MSG_URI_V1 = "/rooms/message"
-
-NOTIFY_URI_V2 = "/room/{id_or_name}/notification"
-
-
-def send_msg_v1(module, token, room, msg_from, msg, msg_format='text',
- color='yellow', notify=False, api=MSG_URI_V1):
- '''sending message to hipchat v1 server'''
-
- params = {}
- params['room_id'] = room
- params['from'] = msg_from[:15] # max length is 15
- params['message'] = msg
- params['message_format'] = msg_format
- params['color'] = color
- params['api'] = api
- params['notify'] = int(notify)
-
- url = api + MSG_URI_V1 + "?auth_token=%s" % (token)
- data = urlencode(params)
-
- if module.check_mode:
- # In check mode, exit before actually sending the message
- module.exit_json(changed=False)
-
- response, info = fetch_url(module, url, data=data)
- if info['status'] == 200:
- return response.read()
- else:
- module.fail_json(msg="failed to send message, return status=%s" % str(info['status']))
-
-
-def send_msg_v2(module, token, room, msg_from, msg, msg_format='text',
- color='yellow', notify=False, api=NOTIFY_URI_V2):
- '''sending message to hipchat v2 server'''
-
- headers = {'Authorization': 'Bearer %s' % token, 'Content-Type': 'application/json'}
-
- body = dict()
- body['message'] = msg
- body['color'] = color
- body['message_format'] = msg_format
- body['notify'] = notify
-
- POST_URL = api + NOTIFY_URI_V2
-
- url = POST_URL.replace('{id_or_name}', pathname2url(room))
- data = json.dumps(body)
-
- if module.check_mode:
- # In check mode, exit before actually sending the message
- module.exit_json(changed=False)
-
- response, info = fetch_url(module, url, data=data, headers=headers, method='POST')
-
- # https://www.hipchat.com/docs/apiv2/method/send_room_notification shows
- # 204 to be the expected result code.
- if info['status'] in [200, 204]:
- return response.read()
- else:
- module.fail_json(msg="failed to send message, return status=%s" % str(info['status']))
-
-
-# ===========================================
-# Module execution.
-#
-
-def main():
-
- module = AnsibleModule(
- argument_spec=dict(
- token=dict(required=True, no_log=True),
- room=dict(required=True),
- msg=dict(required=True),
- msg_from=dict(default="Ansible", aliases=['from']),
- color=dict(default="yellow", choices=["yellow", "red", "green",
- "purple", "gray", "random"]),
- msg_format=dict(default="text", choices=["text", "html"]),
- notify=dict(default=True, type='bool'),
- validate_certs=dict(default=True, type='bool'),
- api=dict(default=DEFAULT_URI),
- ),
- supports_check_mode=True
- )
-
- token = module.params["token"]
- room = str(module.params["room"])
- msg = module.params["msg"]
- msg_from = module.params["msg_from"]
- color = module.params["color"]
- msg_format = module.params["msg_format"]
- notify = module.params["notify"]
- api = module.params["api"]
-
- try:
- if api.find('/v2') != -1:
- send_msg_v2(module, token, room, msg_from, msg, msg_format, color, notify, api)
- else:
- send_msg_v1(module, token, room, msg_from, msg, msg_format, color, notify, api)
- except Exception as e:
- module.fail_json(msg="unable to send msg: %s" % to_native(e), exception=traceback.format_exc())
-
- changed = True
- module.exit_json(changed=changed, room=room, msg_from=msg_from, msg=msg)
-
-
-if __name__ == '__main__':
- main()
diff --git a/plugins/modules/homebrew.py b/plugins/modules/homebrew.py
index 25d4fc37e2..021f990e67 100644
--- a/plugins/modules/homebrew.py
+++ b/plugins/modules/homebrew.py
@@ -83,7 +83,7 @@ options:
default: false
version_added: 9.0.0
notes:
- - When used with a C(loop:) each package will be processed individually, it is much more efficient to pass the list directly
+ - When used with a C(loop:) each package is processed individually, it is much more efficient to pass the list directly
to the O(name) option.
"""
@@ -173,7 +173,7 @@ changed_pkgs:
- List of package names which are changed after module run.
returned: success
type: list
- sample: ['git', 'git-cola']
+ sample: ["git", "git-cola"]
version_added: '0.2.0'
"""
@@ -807,13 +807,11 @@ def main():
argument_spec=dict(
name=dict(
aliases=["pkg", "package", "formula"],
- required=False,
type='list',
elements='str',
),
path=dict(
default="/usr/local/bin:/opt/homebrew/bin:/home/linuxbrew/.linuxbrew/bin",
- required=False,
type='path',
),
state=dict(
@@ -835,13 +833,11 @@ def main():
type='bool',
),
install_options=dict(
- default=None,
aliases=['options'],
type='list',
elements='str',
),
upgrade_options=dict(
- default=None,
type='list',
elements='str',
),
diff --git a/plugins/modules/homebrew_cask.py b/plugins/modules/homebrew_cask.py
index 7455a61d69..948f5c1fd1 100644
--- a/plugins/modules/homebrew_cask.py
+++ b/plugins/modules/homebrew_cask.py
@@ -734,13 +734,11 @@ def main():
argument_spec=dict(
name=dict(
aliases=["pkg", "package", "cask"],
- required=False,
type='list',
elements='str',
),
path=dict(
default="/usr/local/bin:/opt/homebrew/bin",
- required=False,
type='path',
),
state=dict(
@@ -753,7 +751,6 @@ def main():
),
sudo_password=dict(
type="str",
- required=False,
no_log=True,
),
update_homebrew=dict(
@@ -761,7 +758,6 @@ def main():
type='bool',
),
install_options=dict(
- default=None,
aliases=['options'],
type='list',
elements='str',
diff --git a/plugins/modules/homebrew_tap.py b/plugins/modules/homebrew_tap.py
index f070ccccc7..f50472f90d 100644
--- a/plugins/modules/homebrew_tap.py
+++ b/plugins/modules/homebrew_tap.py
@@ -220,11 +220,10 @@ def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(aliases=['tap'], type='list', required=True, elements='str'),
- url=dict(default=None, required=False),
+ url=dict(),
state=dict(default='present', choices=['present', 'absent']),
path=dict(
default="/usr/local/bin:/opt/homebrew/bin:/home/linuxbrew/.linuxbrew/bin",
- required=False,
type='path',
),
),
diff --git a/plugins/modules/homectl.py b/plugins/modules/homectl.py
index 72f1882dec..f93867d03b 100644
--- a/plugins/modules/homectl.py
+++ b/plugins/modules/homectl.py
@@ -65,7 +65,7 @@ options:
type: str
resize:
description:
- - When used with O(disksize) this will attempt to resize the home directory immediately.
+ - When used with O(disksize) this attempts to resize the home directory immediately.
default: false
type: bool
realname:
@@ -218,53 +218,54 @@ EXAMPLES = r"""
RETURN = r"""
data:
- description: Dictionary returned from C(homectl inspect -j).
- returned: success
- type: dict
- sample: {
- "data": {
- "binding": {
- "e9ed2a5b0033427286b228e97c1e8343": {
- "fileSystemType": "btrfs",
- "fileSystemUuid": "7bd59491-2812-4642-a492-220c3f0c6c0b",
- "gid": 60268,
- "imagePath": "/home/james.home",
- "luksCipher": "aes",
- "luksCipherMode": "xts-plain64",
- "luksUuid": "7f05825a-2c38-47b4-90e1-f21540a35a81",
- "luksVolumeKeySize": 32,
- "partitionUuid": "5a906126-d3c8-4234-b230-8f6e9b427b2f",
- "storage": "luks",
- "uid": 60268
- }
- },
+ description: Dictionary returned from C(homectl inspect -j).
+ returned: success
+ type: dict
+ sample:
+ {
+ "data": {
+ "binding": {
+ "e9ed2a5b0033427286b228e97c1e8343": {
+ "fileSystemType": "btrfs",
+ "fileSystemUuid": "7bd59491-2812-4642-a492-220c3f0c6c0b",
+ "gid": 60268,
+ "imagePath": "/home/james.home",
+ "luksCipher": "aes",
+ "luksCipherMode": "xts-plain64",
+ "luksUuid": "7f05825a-2c38-47b4-90e1-f21540a35a81",
+ "luksVolumeKeySize": 32,
+ "partitionUuid": "5a906126-d3c8-4234-b230-8f6e9b427b2f",
+ "storage": "luks",
+ "uid": 60268
+ }
+ },
+ "diskSize": 3221225472,
+ "disposition": "regular",
+ "lastChangeUSec": 1641941238208691,
+ "lastPasswordChangeUSec": 1641941238208691,
+ "privileged": {
+ "hashedPassword": [
+ "$6$ov9AKni.trf76inT$tTtfSyHgbPTdUsG0CvSSQZXGqFGdHKQ9Pb6e0BTZhDmlgrL/vA5BxrXduBi8u/PCBiYUffGLIkGhApjKMK3bV."
+ ]
+ },
+ "signature": [
+ {
+ "data": "o6zVFbymcmk4YTVaY6KPQK23YCp+VkXdGEeniZeV1pzIbFzoaZBvVLPkNKMoPAQbodY5BYfBtuy41prNL78qAg==",
+ "key": "-----BEGIN PUBLIC KEY-----\nMCowBQYDK2VwAyEAbs7ELeiEYBxkUQhxZ+5NGyu6J7gTtZtZ5vmIw3jowcY=\n-----END PUBLIC KEY-----\n"
+ }
+ ],
+ "status": {
+ "e9ed2a5b0033427286b228e97c1e8343": {
+ "diskCeiling": 21845405696,
+ "diskFloor": 268435456,
"diskSize": 3221225472,
- "disposition": "regular",
- "lastChangeUSec": 1641941238208691,
- "lastPasswordChangeUSec": 1641941238208691,
- "privileged": {
- "hashedPassword": [
- "$6$ov9AKni.trf76inT$tTtfSyHgbPTdUsG0CvSSQZXGqFGdHKQ9Pb6e0BTZhDmlgrL/vA5BxrXduBi8u/PCBiYUffGLIkGhApjKMK3bV."
- ]
- },
- "signature": [
- {
- "data": "o6zVFbymcmk4YTVaY6KPQK23YCp+VkXdGEeniZeV1pzIbFzoaZBvVLPkNKMoPAQbodY5BYfBtuy41prNL78qAg==",
- "key": "-----BEGIN PUBLIC KEY-----\nMCowBQYDK2VwAyEAbs7ELeiEYBxkUQhxZ+5NGyu6J7gTtZtZ5vmIw3jowcY=\n-----END PUBLIC KEY-----\n"
- }
- ],
- "status": {
- "e9ed2a5b0033427286b228e97c1e8343": {
- "diskCeiling": 21845405696,
- "diskFloor": 268435456,
- "diskSize": 3221225472,
- "service": "io.systemd.Home",
- "signedLocally": true,
- "state": "inactive"
- }
- },
- "userName": "james",
- }
+ "service": "io.systemd.Home",
+ "signedLocally": true,
+ "state": "inactive"
+ }
+ },
+ "userName": "james"
+ }
}
"""
diff --git a/plugins/modules/honeybadger_deployment.py b/plugins/modules/honeybadger_deployment.py
index b303313f70..2512fc2642 100644
--- a/plugins/modules/honeybadger_deployment.py
+++ b/plugins/modules/honeybadger_deployment.py
@@ -51,7 +51,7 @@ options:
default: "https://api.honeybadger.io/v1/deploys"
validate_certs:
description:
- - If V(false), SSL certificates for the target URL will not be validated. This should only be used on personally controlled
+ - If V(false), SSL certificates for the target URL are not validated. This should only be used on personally controlled
sites using self-signed certificates.
type: bool
default: true
@@ -67,7 +67,7 @@ EXAMPLES = r"""
repo: 'git@github.com:user/repo.git'
"""
-RETURN = """# """
+RETURN = """#"""
import traceback
@@ -88,9 +88,9 @@ def main():
token=dict(required=True, no_log=True),
environment=dict(required=True),
user=dict(required=False),
- repo=dict(required=False),
- revision=dict(required=False),
- url=dict(required=False, default='https://api.honeybadger.io/v1/deploys'),
+ repo=dict(),
+ revision=dict(),
+ url=dict(default='https://api.honeybadger.io/v1/deploys'),
validate_certs=dict(default=True, type='bool'),
),
supports_check_mode=True
diff --git a/plugins/modules/hpilo_boot.py b/plugins/modules/hpilo_boot.py
index c3d14564d6..f04aaaed20 100644
--- a/plugins/modules/hpilo_boot.py
+++ b/plugins/modules/hpilo_boot.py
@@ -67,7 +67,7 @@ options:
force:
description:
- Whether to force a reboot (even when the system is already booted).
- - As a safeguard, without force, hpilo_boot will refuse to reboot a server that is already running.
+ - As a safeguard, without force, M(community.general.hpilo_boot) refuses to reboot a server that is already running.
default: false
type: bool
ssl_version:
@@ -78,7 +78,7 @@ options:
choices: ["SSLv3", "SSLv23", "TLSv1", "TLSv1_1", "TLSv1_2"]
idempotent_boot_once:
description:
- - "This option makes O(state=boot_once) succeed instead of failing when the server is already powered on."
+ - This option makes O(state=boot_once) succeed instead of failing when the server is already powered on.
type: bool
default: false
version_added: 10.6.0
diff --git a/plugins/modules/hpilo_info.py b/plugins/modules/hpilo_info.py
index 70eecb8b0e..90680603e8 100644
--- a/plugins/modules/hpilo_info.py
+++ b/plugins/modules/hpilo_info.py
@@ -121,7 +121,7 @@ hw_uuid:
host_power_status:
description:
- Power status of host.
- - Will be one of V(ON), V(OFF) and V(UNKNOWN).
+ - It is one of V(ON), V(OFF) and V(UNKNOWN).
returned: always
type: str
sample: "ON"
diff --git a/plugins/modules/hponcfg.py b/plugins/modules/hponcfg.py
index 654ba2c710..c2d32c7d89 100644
--- a/plugins/modules/hponcfg.py
+++ b/plugins/modules/hponcfg.py
@@ -97,7 +97,6 @@ class HPOnCfg(ModuleHelper):
verbose=cmd_runner_fmt.as_bool("-v"),
minfw=cmd_runner_fmt.as_opt_val("-m"),
)
- use_old_vardict = False
def __run__(self):
runner = CmdRunner(
diff --git a/plugins/modules/htpasswd.py b/plugins/modules/htpasswd.py
index f13cd5a610..d8a755476f 100644
--- a/plugins/modules/htpasswd.py
+++ b/plugins/modules/htpasswd.py
@@ -46,10 +46,13 @@ options:
description:
- Hashing scheme to be used. As well as the four choices listed here, you can also use any other hash supported by passlib,
such as V(portable_apache22) and V(host_apache24); or V(md5_crypt) and V(sha256_crypt), which are Linux passwd hashes.
- Only some schemes in addition to the four choices below will be compatible with Apache or Nginx, and supported schemes
- depend on passlib version and its dependencies.
+ Only some schemes in addition to the four choices below are compatible with Apache or Nginx, and supported schemes
+ depend on C(passlib) version and its dependencies.
- See U(https://passlib.readthedocs.io/en/stable/lib/passlib.apache.html#passlib.apache.HtpasswdFile) parameter C(default_scheme).
- 'Some of the available choices might be: V(apr_md5_crypt), V(des_crypt), V(ldap_sha1), V(plaintext).'
+ - 'B(WARNING): The module has no mechanism to determine the O(hash_scheme) of an existing entry, therefore, it does
+ not detect whether the O(hash_scheme) has changed. If you want to change the scheme, you must remove the existing
+ entry and then create a new one using the new scheme.'
aliases: [crypt_scheme]
state:
type: str
@@ -63,8 +66,8 @@ options:
type: bool
default: true
description:
- - Used with O(state=present). If V(true), the file will be created if it does not exist. Conversely, if set to V(false)
- and the file does not exist it will fail.
+ - Used with O(state=present). If V(true), the file is created if it does not exist. Conversely, if set to V(false) and
+ the file does not exist, it fails.
notes:
- This module depends on the C(passlib) Python library, which needs to be installed on all target systems.
- 'On Debian < 11, Ubuntu <= 20.04, or Fedora: install C(python-passlib).'
@@ -188,9 +191,9 @@ def main():
arg_spec = dict(
path=dict(type='path', required=True, aliases=["dest", "destfile"]),
name=dict(type='str', required=True, aliases=["username"]),
- password=dict(type='str', required=False, default=None, no_log=True),
- hash_scheme=dict(type='str', required=False, default="apr_md5_crypt", aliases=["crypt_scheme"]),
- state=dict(type='str', required=False, default="present", choices=["present", "absent"]),
+ password=dict(type='str', no_log=True),
+ hash_scheme=dict(type='str', default="apr_md5_crypt", aliases=["crypt_scheme"]),
+ state=dict(type='str', default="present", choices=["present", "absent"]),
create=dict(type='bool', default=True),
)
@@ -238,8 +241,8 @@ def main():
(msg, changed) = present(path, username, password, hash_scheme, create, check_mode)
elif state == 'absent':
if not os.path.exists(path):
- module.exit_json(msg="%s not present" % username,
- warnings="%s does not exist" % path, changed=False)
+ module.warn("%s does not exist" % path)
+ module.exit_json(msg="%s not present" % username, changed=False)
(msg, changed) = absent(path, username, check_mode)
else:
module.fail_json(msg="Invalid state: %s" % state)
diff --git a/plugins/modules/hwc_ecs_instance.py b/plugins/modules/hwc_ecs_instance.py
index f01b7c48fd..13becdf07f 100644
--- a/plugins/modules/hwc_ecs_instance.py
+++ b/plugins/modules/hwc_ecs_instance.py
@@ -442,7 +442,7 @@ created:
disk_config_type:
description:
- Specifies the disk configuration type. MANUAL is The image space is not expanded. AUTO is the image space of the system
- disk will be expanded to be as same as the flavor.
+ disk is expanded to be as same as the flavor.
type: str
returned: success
host_name:
diff --git a/plugins/modules/hwc_evs_disk.py b/plugins/modules/hwc_evs_disk.py
index 0763c07b01..0d57caf6cb 100644
--- a/plugins/modules/hwc_evs_disk.py
+++ b/plugins/modules/hwc_evs_disk.py
@@ -70,8 +70,8 @@ options:
- SSD specifies the ultra-high I/O disk type.
- SAS specifies the high I/O disk type.
- SATA specifies the common I/O disk type.
- - If the specified disk type is not available in the AZ, the disk will fail to create. If the EVS disk is created from
- a snapshot, the volume_type field must be the same as that of the snapshot's source disk.
+ - If the specified disk type is not available in the AZ, the disk creation fails. If the EVS disk is created from a
+ snapshot, the volume_type field must be the same as that of the snapshot's source disk.
type: str
required: true
backup_id:
@@ -92,9 +92,9 @@ options:
required: false
enable_scsi:
description:
- - If this parameter is set to True, the disk device type will be SCSI, which allows ECS OSs to directly access underlying
- storage media. SCSI reservation command is supported. If this parameter is set to False, the disk device type will
- be VBD, which supports only simple SCSI read/write commands.
+ - If this parameter is set to V(true), the disk device type is SCSI, which allows ECS OSs to directly access underlying
+ storage media. SCSI reservation command is supported. If this parameter is set to V(false), the disk device type is
+ VBD, which supports only simple SCSI read/write commands.
- If parameter enable_share is set to True and this parameter is not specified, shared SCSI disks are created. SCSI
EVS disks cannot be created from backups, which means that this parameter cannot be True if backup_id has been specified.
type: bool
@@ -167,8 +167,8 @@ volume_type:
- SSD specifies the ultra-high I/O disk type.
- SAS specifies the high I/O disk type.
- SATA specifies the common I/O disk type.
- - If the specified disk type is not available in the AZ, the disk will fail to create. If the EVS disk is created from
- a snapshot, the volume_type field must be the same as that of the snapshot's source disk.
+ - If the specified disk type is not available in the AZ, the disk creation fails. If the EVS disk is created from a snapshot,
+ the volume_type field must be the same as that of the snapshot's source disk.
type: str
returned: success
backup_id:
@@ -189,8 +189,8 @@ enable_full_clone:
returned: success
enable_scsi:
description:
- - If this parameter is set to True, the disk device type will be SCSI, which allows ECS OSs to directly access underlying
- storage media. SCSI reservation command is supported. If this parameter is set to False, the disk device type will be
+ - If this parameter is set to V(true), the disk device type is SCSI, which allows ECS OSs to directly access underlying
+ storage media. SCSI reservation command is supported. If this parameter is set to V(false), the disk device type is
VBD, which supports only simple SCSI read/write commands.
- If parameter enable_share is set to True and this parameter is not specified, shared SCSI disks are created. SCSI EVS
disks cannot be created from backups, which means that this parameter cannot be True if backup_id has been specified.
diff --git a/plugins/modules/hwc_network_vpc.py b/plugins/modules/hwc_network_vpc.py
index 3342280061..d34e428d6a 100644
--- a/plugins/modules/hwc_network_vpc.py
+++ b/plugins/modules/hwc_network_vpc.py
@@ -117,7 +117,7 @@ routes:
returned: success
next_hop:
description:
- - The next hop of a route. If the route type is peering, it will provide VPC peering connection ID.
+ - The next hop of a route. If the route type is peering, it provides VPC peering connection ID.
type: str
returned: success
enable_shared_snat:
diff --git a/plugins/modules/hwc_vpc_eip.py b/plugins/modules/hwc_vpc_eip.py
index b818fe0d86..e830c2b14b 100644
--- a/plugins/modules/hwc_vpc_eip.py
+++ b/plugins/modules/hwc_vpc_eip.py
@@ -92,7 +92,7 @@ options:
required: false
ip_version:
description:
- - The value can be 4 (IPv4 address) or 6 (IPv6 address). If this parameter is left blank, an IPv4 address will be assigned.
+ - The value can be 4 (IPv4 address) or 6 (IPv6 address). If this parameter is left blank, an IPv4 address is assigned.
type: int
required: false
ipv4_address:
@@ -193,7 +193,7 @@ enterprise_project_id:
returned: success
ip_version:
description:
- - The value can be 4 (IPv4 address) or 6 (IPv6 address). If this parameter is left blank, an IPv4 address will be assigned.
+ - The value can be 4 (IPv4 address) or 6 (IPv6 address). If this parameter is left blank, an IPv4 address is assigned.
type: int
returned: success
ipv4_address:
diff --git a/plugins/modules/hwc_vpc_private_ip.py b/plugins/modules/hwc_vpc_private_ip.py
index 695c644cb9..e665568774 100644
--- a/plugins/modules/hwc_vpc_private_ip.py
+++ b/plugins/modules/hwc_vpc_private_ip.py
@@ -21,7 +21,7 @@ notes:
- If O(id) option is provided, it takes precedence over O(subnet_id), O(ip_address) for private IP selection.
- O(subnet_id), O(ip_address) are used for private IP selection. If more than one private IP with this options exists, execution
is aborted.
- - No parameter support updating. If one of option is changed, the module will create a new resource.
+ - No parameter support updating. If one of option is changed, the module creates a new resource.
version_added: '0.2.0'
author: Huawei Inc. (@huaweicloud)
requirements:
diff --git a/plugins/modules/hwc_vpc_route.py b/plugins/modules/hwc_vpc_route.py
index 85224fd4c8..20bbba6cd8 100644
--- a/plugins/modules/hwc_vpc_route.py
+++ b/plugins/modules/hwc_vpc_route.py
@@ -21,7 +21,7 @@ notes:
- If O(id) option is provided, it takes precedence over O(destination), O(vpc_id), O(type), and O(next_hop) for route selection.
- O(destination), O(vpc_id), O(type) and O(next_hop) are used for route selection. If more than one route with this options
exists, execution is aborted.
- - No parameter support updating. If one of option is changed, the module will create a new resource.
+ - No parameter support updating. If one of option is changed, the module creates a new resource.
version_added: '0.2.0'
author: Huawei Inc. (@huaweicloud)
requirements:
diff --git a/plugins/modules/hwc_vpc_security_group.py b/plugins/modules/hwc_vpc_security_group.py
index 9f53b49c0d..e1b2b41ae4 100644
--- a/plugins/modules/hwc_vpc_security_group.py
+++ b/plugins/modules/hwc_vpc_security_group.py
@@ -22,7 +22,7 @@ notes:
selection.
- O(name), O(enterprise_project_id) and O(vpc_id) are used for security group selection. If more than one security group
with this options exists, execution is aborted.
- - No parameter support updating. If one of option is changed, the module will create a new resource.
+ - No parameter support updating. If one of option is changed, the module creates a new resource.
version_added: '0.2.0'
author: Huawei Inc. (@huaweicloud)
requirements:
diff --git a/plugins/modules/hwc_vpc_security_group_rule.py b/plugins/modules/hwc_vpc_security_group_rule.py
index 0848901cd5..42f854a029 100644
--- a/plugins/modules/hwc_vpc_security_group_rule.py
+++ b/plugins/modules/hwc_vpc_security_group_rule.py
@@ -21,7 +21,7 @@ notes:
- If O(id) option is provided, it takes precedence over O(security_group_id) for security group rule selection.
- O(security_group_id) is used for security group rule selection. If more than one security group rule with this options
exists, execution is aborted.
- - No parameter support updating. If one of option is changed, the module will create a new resource.
+ - No parameter support updating. If one of option is changed, the module creates a new resource.
version_added: '0.2.0'
author: Huawei Inc. (@huaweicloud)
requirements:
diff --git a/plugins/modules/hwc_vpc_subnet.py b/plugins/modules/hwc_vpc_subnet.py
index 84a9219370..b9af890688 100644
--- a/plugins/modules/hwc_vpc_subnet.py
+++ b/plugins/modules/hwc_vpc_subnet.py
@@ -86,7 +86,7 @@ options:
required: false
dns_address:
description:
- - Specifies the DNS server addresses for subnet. The address in the head will be used first.
+ - Specifies the DNS server addresses for subnet. The address in the head is used first.
type: list
elements: str
required: false
@@ -148,7 +148,7 @@ dhcp_enable:
returned: success
dns_address:
description:
- - Specifies the DNS server addresses for subnet. The address in the head will be used first.
+ - Specifies the DNS server addresses for subnet. The address in the head is used first.
type: list
returned: success
"""
diff --git a/plugins/modules/ibm_sa_host.py b/plugins/modules/ibm_sa_host.py
index f6613b3b29..b3d80a6b62 100644
--- a/plugins/modules/ibm_sa_host.py
+++ b/plugins/modules/ibm_sa_host.py
@@ -41,8 +41,8 @@ options:
type: str
domain:
description:
- - The domains the cluster will be attached to. To include more than one domain, separate domain names with commas. To
- include all existing domains, use an asterisk (V(*)).
+ - The domains the cluster is attached to. To include more than one domain, separate domain names with commas. To include
+ all existing domains, use an asterisk (V(*)).
required: false
type: str
iscsi_chap_name:
diff --git a/plugins/modules/icinga2_host.py b/plugins/modules/icinga2_host.py
index 8d0a3b554b..d78f607aae 100644
--- a/plugins/modules/icinga2_host.py
+++ b/plugins/modules/icinga2_host.py
@@ -30,13 +30,13 @@ options:
- HTTP, HTTPS, or FTP URL in the form V((http|https|ftp\)://[user[:pass]]@host.domain[:port]/path).
use_proxy:
description:
- - If V(false), it will not use a proxy, even if one is defined in an environment variable on the target hosts.
+ - If V(false), it does not use a proxy, even if one is defined in an environment variable on the target hosts.
type: bool
default: true
validate_certs:
description:
- - If V(false), SSL certificates will not be validated. This should only be used on personally controlled sites using
- self-signed certificates.
+ - If V(false), SSL certificates are not validated. This should only be used on personally controlled sites using self-signed
+ certificates.
type: bool
default: true
url_username:
@@ -48,12 +48,12 @@ options:
type: str
description:
- The password for use in HTTP basic authentication.
- - If the O(url_username) parameter is not specified, the O(url_password) parameter will not be used.
+ - If the O(url_username) parameter is not specified, the O(url_password) parameter is not used.
force_basic_auth:
description:
- - C(httplib2), the library used by Ansible's HTTP request code only sends authentication information when a webservice responds to
- an initial request with a 401 status. Since some basic auth services do not properly send a 401, logins will fail.
- This option forces the sending of the Basic authentication header upon initial request.
+ - C(httplib2), the library used by Ansible's HTTP request code only sends authentication information when a webservice
+ responds to an initial request with a 401 status. Since some basic auth services do not properly send a 401, logins
+ may fail. This option forces the sending of the Basic authentication header upon initial request.
type: bool
default: false
client_cert:
@@ -235,11 +235,11 @@ def main():
state=dict(default="present", choices=["absent", "present"]),
name=dict(required=True, aliases=['host']),
zone=dict(),
- template=dict(default=None),
+ template=dict(),
check_command=dict(default="hostalive"),
- display_name=dict(default=None),
+ display_name=dict(),
ip=dict(),
- variables=dict(type='dict', default=None),
+ variables=dict(type='dict'),
)
# Define the main module
diff --git a/plugins/modules/imc_rest.py b/plugins/modules/imc_rest.py
index 8a0b63cd78..674ba0d2b3 100644
--- a/plugins/modules/imc_rest.py
+++ b/plugins/modules/imc_rest.py
@@ -57,8 +57,8 @@ options:
description:
- When used instead of O(path), sets the content of the API requests directly.
- This may be convenient to template simple requests, for anything complex use the M(ansible.builtin.template) module.
- - You can collate multiple IMC XML fragments and they will be processed sequentially in a single stream, the Cisco IMC
- output is subsequently merged.
+ - You can collate multiple IMC XML fragments and they are processed sequentially in a single stream, the Cisco IMC output
+ is subsequently merged.
- Parameter O(content) is mutual exclusive with parameter O(path).
type: str
protocol:
@@ -71,12 +71,12 @@ options:
description:
- The socket level timeout in seconds.
- This is the time that every single connection (every fragment) can spend. If this O(timeout) is reached, the module
- will fail with a C(Connection failure) indicating that C(The read operation timed out).
+ fails with a C(Connection failure) indicating that C(The read operation timed out).
default: 60
type: int
validate_certs:
description:
- - If V(false), SSL certificates will not be validated.
+ - If V(false), SSL certificates are not validated.
- This should only set to V(false) used on personally controlled sites using self-signed certificates.
type: bool
default: true
@@ -84,8 +84,8 @@ notes:
- The XML fragments do not need an authentication cookie, this is injected by the module automatically.
- The Cisco IMC XML output is being translated to JSON using the Cobra convention.
- Any configConfMo change requested has a return status of C(modified), even if there was no actual change from the previous
- configuration. As a result, this module will always report a change on subsequent runs. In case this behaviour is fixed
- in a future update to Cisco IMC, this module will automatically adapt.
+ configuration. As a result, this module always reports a change on subsequent runs. In case this behaviour is fixed in
+ a future update to Cisco IMC, this module is meant to automatically adapt.
- If you get a C(Connection failure) related to C(The read operation timed out) increase the O(timeout) parameter. Some
XML fragments can take longer than the default timeout.
- More information about the IMC REST API is available from
diff --git a/plugins/modules/imgadm.py b/plugins/modules/imgadm.py
index 344bf9cc56..1c29e8a94b 100644
--- a/plugins/modules/imgadm.py
+++ b/plugins/modules/imgadm.py
@@ -44,7 +44,7 @@ options:
choices: [present, absent, deleted, imported, updated, vacuumed]
description:
- State the object operated on should be in. V(imported) is an alias for for V(present) and V(deleted) for V(absent).
- When set to V(vacuumed) and O(uuid=*), it will remove all unused images.
+ When set to V(vacuumed) and O(uuid=*), it removes all unused images.
type: str
type:
diff --git a/plugins/modules/infinity.py b/plugins/modules/infinity.py
index 3bcb5aceda..cc54b46c51 100644
--- a/plugins/modules/infinity.py
+++ b/plugins/modules/infinity.py
@@ -126,7 +126,7 @@ network_info:
"network_size": null,
"description": null,
"network_location": "3085",
- "ranges": { "id": 0, "name": null,"first_ip": null,"type": null,"last_ip": null},
+ "ranges": {"id": 0, "name": null, "first_ip": null, "type": null, "last_ip": null},
"network_type": "lan",
"network_name": "'reserve_new_ansible_network'"
}
diff --git a/plugins/modules/influxdb_user.py b/plugins/modules/influxdb_user.py
index bc66ff693d..45410e76a5 100644
--- a/plugins/modules/influxdb_user.py
+++ b/plugins/modules/influxdb_user.py
@@ -37,7 +37,7 @@ options:
admin:
description:
- Whether the user should be in the admin role or not.
- - Since version 2.8, the role will also be updated.
+ - Since version 2.8, the role is also updated.
default: false
type: bool
state:
@@ -50,8 +50,8 @@ options:
description:
- Privileges to grant to this user.
- Takes a list of dicts containing the "database" and "privilege" keys.
- - If this argument is not provided, the current grants will be left alone.
- - If an empty list is provided, all grants for the user will be removed.
+ - If this argument is not provided, the current grants are left alone.
+ - If an empty list is provided, all grants for the user are removed.
type: list
elements: dict
extends_documentation_fragment:
@@ -101,9 +101,7 @@ EXAMPLES = r"""
state: absent
"""
-RETURN = r"""
-#only defaults
-"""
+RETURN = r"""#"""
import json
@@ -219,7 +217,7 @@ def main():
argument_spec.update(
state=dict(default='present', type='str', choices=['present', 'absent']),
user_name=dict(required=True, type='str'),
- user_password=dict(required=False, type='str', no_log=True),
+ user_password=dict(type='str', no_log=True),
admin=dict(default='False', type='bool'),
grants=dict(type='list', elements='dict'),
)
diff --git a/plugins/modules/ini_file.py b/plugins/modules/ini_file.py
index 61e6662d95..04fe92fa08 100644
--- a/plugins/modules/ini_file.py
+++ b/plugins/modules/ini_file.py
@@ -39,7 +39,7 @@ options:
section:
description:
- Section name in INI file. This is added if O(state=present) automatically when a single value is being set.
- - If being omitted, the O(option) will be placed before the first O(section).
+ - If being omitted, the O(option) is placed before the first O(section).
- Omitting O(section) is also required if the config format does not support sections.
type: str
section_has_values:
@@ -63,7 +63,7 @@ options:
elements: str
description:
- Among possibly multiple sections of the same name, select the first one that contains matching options and values.
- - With O(state=present), if a suitable section is not found, a new section will be added, including the required options.
+ - With O(state=present), if a suitable section is not found, a new section is added, including the required options.
- With O(state=absent), at most one O(section) is removed if it contains the values.
version_added: 8.6.0
option:
@@ -100,8 +100,8 @@ options:
O(option)s with the same name are not touched.
- If set to V(present) and O(exclusive) set to V(false) the specified O(option=values) lines are added, but the other
O(option)s with the same name are not touched.
- - If set to V(present) and O(exclusive) set to V(true) all given O(option=values) lines will be added and the other
- O(option)s with the same name are removed.
+ - If set to V(present) and O(exclusive) set to V(true) all given O(option=values) lines are added and the other O(option)s
+ with the same name are removed.
type: str
choices: [absent, present]
default: present
@@ -126,8 +126,8 @@ options:
version_added: 7.5.0
create:
description:
- - If set to V(false), the module will fail if the file does not already exist.
- - By default it will create the file if it is missing.
+ - If set to V(false), the module fails if the file does not already exist.
+ - By default it creates the file if it is missing.
type: bool
default: true
allow_no_value:
@@ -268,21 +268,21 @@ from ansible.module_utils.common.text.converters import to_bytes, to_text
def match_opt(option, line):
option = re.escape(option)
- return re.match('([#;]?)( |\t)*(%s)( |\t)*(=|$)( |\t)*(.*)' % option, line)
+ return re.match('( |\t)*([#;]?)( |\t)*(%s)( |\t)*(=|$)( |\t)*(.*)' % option, line)
def match_active_opt(option, line):
option = re.escape(option)
- return re.match('()( |\t)*(%s)( |\t)*(=|$)( |\t)*(.*)' % option, line)
+ return re.match('()()( |\t)*(%s)( |\t)*(=|$)( |\t)*(.*)' % option, line)
def update_section_line(option, changed, section_lines, index, changed_lines, ignore_spaces, newline, msg):
option_changed = None
if ignore_spaces:
old_match = match_opt(option, section_lines[index])
- if not old_match.group(1):
+ if not old_match.group(2):
new_match = match_opt(option, newline)
- option_changed = old_match.group(7) != new_match.group(7)
+ option_changed = old_match.group(8) != new_match.group(8)
if option_changed is None:
option_changed = section_lines[index] != newline
if option_changed:
@@ -299,7 +299,7 @@ def check_section_has_values(section_has_values, section_lines):
for condition in section_has_values:
for line in section_lines:
match = match_opt(condition["option"], line)
- if match and (len(condition["values"]) == 0 or match.group(7) in condition["values"]):
+ if match and (len(condition["values"]) == 0 or match.group(8) in condition["values"]):
break
else:
return False
@@ -432,8 +432,8 @@ def do_ini(module, filename, section=None, section_has_values=None, option=None,
for index, line in enumerate(section_lines):
if match_function(option, line):
match = match_function(option, line)
- if values and match.group(7) in values:
- matched_value = match.group(7)
+ if values and match.group(8) in values:
+ matched_value = match.group(8)
if not matched_value and allow_no_value:
# replace existing option with no value line(s)
newline = u'%s\n' % option
@@ -505,7 +505,7 @@ def do_ini(module, filename, section=None, section_has_values=None, option=None,
section_lines = new_section_lines
elif not exclusive and len(values) > 0:
# delete specified option=value line(s)
- new_section_lines = [i for i in section_lines if not (match_active_opt(option, i) and match_active_opt(option, i).group(7) in values)]
+ new_section_lines = [i for i in section_lines if not (match_active_opt(option, i) and match_active_opt(option, i).group(8) in values)]
if section_lines != new_section_lines:
changed = True
msg = 'option changed'
@@ -584,7 +584,7 @@ def main():
option=dict(type='str', required=True),
value=dict(type='str'),
values=dict(type='list', elements='str')
- ), default=None, mutually_exclusive=[['value', 'values']]),
+ ), mutually_exclusive=[['value', 'values']]),
option=dict(type='str'),
value=dict(type='str'),
values=dict(type='list', elements='str'),
diff --git a/plugins/modules/installp.py b/plugins/modules/installp.py
index e54a56949f..da88a7e7c2 100644
--- a/plugins/modules/installp.py
+++ b/plugins/modules/installp.py
@@ -47,7 +47,7 @@ options:
choices: [absent, present]
default: present
notes:
- - If the package is already installed, even the package/fileset is new, the module will not install it.
+ - If the package is already installed, even the package/fileset is new, the module does not install it.
"""
EXAMPLES = r"""
diff --git a/plugins/modules/interfaces_file.py b/plugins/modules/interfaces_file.py
index 23bfd78790..8e315d7b69 100644
--- a/plugins/modules/interfaces_file.py
+++ b/plugins/modules/interfaces_file.py
@@ -45,10 +45,10 @@ options:
value:
type: str
description:
- - If O(option) is not presented for the O(iface) and O(state) is V(present) option will be added. If O(option) already
- exists and is not V(pre-up), V(up), V(post-up) or V(down), its value will be updated. V(pre-up), V(up), V(post-up)
- and V(down) options cannot be updated, only adding new options, removing existing ones or cleaning the whole option
- set are supported.
+ - If O(option) is not presented for the O(iface) and O(state) is V(present), then O(option) is added. If O(option) already
+ exists and is not V(pre-up), V(up), V(post-up) or V(down), its value is updated. V(pre-up), V(up), V(post-up) and
+ V(down) options cannot be updated, only adding new options, removing existing ones or cleaning the whole option set
+ are supported.
backup:
description:
- Create a backup file including the timestamp information so you can get the original file back if you somehow clobbered
@@ -58,12 +58,12 @@ options:
state:
type: str
description:
- - If set to V(absent) the option or section will be removed if present instead of created.
+ - If set to V(absent) the option or section is removed if present instead of created.
default: "present"
choices: ["present", "absent"]
notes:
- - If option is defined multiple times last one will be updated but all will be deleted in case of an absent state.
+ - If option is defined multiple times last one is updated but all others are deleted in case of an O(state=absent).
requirements: []
author: "Roman Belyakovsky (@hryamzik)"
"""
diff --git a/plugins/modules/ipa_dnsrecord.py b/plugins/modules/ipa_dnsrecord.py
index d92e2c4f66..3cba35b11c 100644
--- a/plugins/modules/ipa_dnsrecord.py
+++ b/plugins/modules/ipa_dnsrecord.py
@@ -45,33 +45,31 @@ options:
- Manage DNS record name with this value.
- Mutually exclusive with O(record_values), and exactly one of O(record_value) and O(record_values) has to be specified.
- Use O(record_values) if you need to specify multiple values.
- - In the case of V(A) or V(AAAA) record types, this will be the IP address.
- - In the case of V(A6) record type, this will be the A6 Record data.
- - In the case of V(CNAME) record type, this will be the hostname.
- - In the case of V(DNAME) record type, this will be the DNAME target.
- - In the case of V(NS) record type, this will be the name server hostname. Hostname must already have a valid A or AAAA
- record.
- - In the case of V(PTR) record type, this will be the hostname.
- - In the case of V(TXT) record type, this will be a text.
- - In the case of V(SRV) record type, this will be a service record.
- - In the case of V(MX) record type, this will be a mail exchanger record.
- - In the case of V(SSHFP) record type, this will be an SSH fingerprint record.
+ - In the case of V(A) or V(AAAA) record types, this is the IP address.
+ - In the case of V(A6) record type, this is the A6 Record data.
+ - In the case of V(CNAME) record type, this is the hostname.
+ - In the case of V(DNAME) record type, this is the DNAME target.
+ - In the case of V(NS) record type, this is the name server hostname. Hostname must already have a valid A or AAAA record.
+ - In the case of V(PTR) record type, this is the hostname.
+ - In the case of V(TXT) record type, this is a text.
+ - In the case of V(SRV) record type, this is a service record.
+ - In the case of V(MX) record type, this is a mail exchanger record.
+ - In the case of V(SSHFP) record type, this is an SSH fingerprint record.
type: str
record_values:
description:
- Manage DNS record name with this value.
- Mutually exclusive with O(record_value), and exactly one of O(record_value) and O(record_values) has to be specified.
- - In the case of V(A) or V(AAAA) record types, this will be the IP address.
- - In the case of V(A6) record type, this will be the A6 Record data.
- - In the case of V(CNAME) record type, this will be the hostname.
- - In the case of V(DNAME) record type, this will be the DNAME target.
- - In the case of V(NS) record type, this will be the name server hostname. Hostname must already have a valid A or AAAA
- record.
- - In the case of V(PTR) record type, this will be the hostname.
- - In the case of V(TXT) record type, this will be a text.
- - In the case of V(SRV) record type, this will be a service record.
- - In the case of V(MX) record type, this will be a mail exchanger record.
- - In the case of V(SSHFP) record type, this will be an SSH fingerprint record.
+ - In the case of V(A) or V(AAAA) record types, this is the IP address.
+ - In the case of V(A6) record type, this is the A6 Record data.
+ - In the case of V(CNAME) record type, this is the hostname.
+ - In the case of V(DNAME) record type, this is the DNAME target.
+ - In the case of V(NS) record type, this is the name server hostname. Hostname must already have a valid A or AAAA record.
+ - In the case of V(PTR) record type, this is the hostname.
+ - In the case of V(TXT) record type, this is a text.
+ - In the case of V(SRV) record type, this is a service record.
+ - In the case of V(MX) record type, this is a mail exchanger record.
+ - In the case of V(SSHFP) record type, this is an SSH fingerprint record.
type: list
elements: str
record_ttl:
@@ -167,7 +165,7 @@ EXAMPLES = r"""
state: absent
- name: Ensure an NS record for a subdomain is present
- community,general.ipa_dnsrecord:
+ community.general.ipa_dnsrecord:
name: subdomain
zone_name: example.com
record_type: 'NS'
@@ -355,7 +353,7 @@ def main():
record_value=dict(type='str'),
record_values=dict(type='list', elements='str'),
state=dict(type='str', default='present', choices=['present', 'absent']),
- record_ttl=dict(type='int', required=False),
+ record_ttl=dict(type='int'),
)
module = AnsibleModule(
diff --git a/plugins/modules/ipa_dnszone.py b/plugins/modules/ipa_dnszone.py
index b536c258d2..81a99bc54b 100644
--- a/plugins/modules/ipa_dnszone.py
+++ b/plugins/modules/ipa_dnszone.py
@@ -173,8 +173,8 @@ def main():
argument_spec = ipa_argument_spec()
argument_spec.update(zone_name=dict(type='str', required=True),
state=dict(type='str', default='present', choices=['present', 'absent']),
- dynamicupdate=dict(type='bool', required=False, default=False),
- allowsyncptr=dict(type='bool', required=False, default=False),
+ dynamicupdate=dict(type='bool', default=False),
+ allowsyncptr=dict(type='bool', default=False),
)
module = AnsibleModule(argument_spec=argument_spec,
diff --git a/plugins/modules/ipa_getkeytab.py b/plugins/modules/ipa_getkeytab.py
index dfd612564b..7c533fb729 100644
--- a/plugins/modules/ipa_getkeytab.py
+++ b/plugins/modules/ipa_getkeytab.py
@@ -67,15 +67,15 @@ options:
retrieve_mode:
description:
- Retrieve an existing key from the server instead of generating a new one.
- - This is incompatible with the O(password), and will work only against a IPA server more recent than version 3.3.
+ - This is incompatible with the O(password), and works only against a IPA server more recent than version 3.3.
- The user requesting the keytab must have access to the keys for this operation to succeed.
- - Be aware that if set V(true), a new keytab will be generated.
+ - Be aware that if set V(true), a new keytab is generated.
- This invalidates all previously retrieved keytabs for this service principal.
type: bool
encryption_types:
description:
- The list of encryption types to use to generate keys.
- - It will use local client defaults if not provided.
+ - It uses local client defaults if not provided.
- Valid values depend on the Kerberos library version and configuration.
type: str
state:
diff --git a/plugins/modules/ipa_group.py b/plugins/modules/ipa_group.py
index 60077a2c6a..934e533dff 100644
--- a/plugins/modules/ipa_group.py
+++ b/plugins/modules/ipa_group.py
@@ -22,7 +22,7 @@ options:
append:
description:
- If V(true), add the listed O(user) and O(group) to the group members.
- - If V(false), only the listed O(user) and O(group) will be group members, removing any other members.
+ - If V(false), only the listed O(user) and O(group) are set as group members, removing any other members.
default: false
type: bool
version_added: 4.0.0
@@ -49,10 +49,10 @@ options:
group:
description:
- List of group names assigned to this group.
- - If O(append=false) and an empty list is passed all groups will be removed from this group.
- - Groups that are already assigned but not passed will be removed.
- - If O(append=true) the listed groups will be assigned without removing other groups.
- - If option is omitted assigned groups will not be checked or changed.
+ - If O(append=false) and an empty list is passed all groups are removed from this group.
+ - Groups that are already assigned but not passed are removed.
+ - If O(append=true) the listed groups are assigned without removing other groups.
+ - If option is omitted assigned groups are not checked or changed.
type: list
elements: str
nonposix:
@@ -62,10 +62,10 @@ options:
user:
description:
- List of user names assigned to this group.
- - If O(append=false) and an empty list is passed all users will be removed from this group.
- - Users that are already assigned but not passed will be removed.
- - If O(append=true) the listed users will be assigned without removing other users.
- - If option is omitted assigned users will not be checked or changed.
+ - If O(append=false) and an empty list is passed all users are removed from this group.
+ - Users that are already assigned but not passed are removed.
+ - If O(append=true) the listed users are assigned without removing other users.
+ - If option is omitted assigned users are not checked or changed.
type: list
elements: str
external_user:
@@ -73,7 +73,7 @@ options:
- List of external users assigned to this group.
- Behaves identically to O(user) with respect to O(append) attribute.
- List entries can be in V(DOMAIN\\\\username) or SID format.
- - Unless SIDs are provided, the module will always attempt to make changes even if the group already has all the users.
+ - Unless SIDs are provided, the module always attempts to make changes even if the group already has all the users.
This is because only SIDs are returned by IPA query.
- O(external=true) is needed for this option to work.
type: list
@@ -261,7 +261,7 @@ def ensure(module, client):
nonposix=module.params['nonposix'])
ipa_group = client.group_find(name=name)
- if (not (external or external_user is None)):
+ if not (external or external_user is None):
module.fail_json("external_user can only be set if external = True")
changed = False
diff --git a/plugins/modules/ipa_hbacrule.py b/plugins/modules/ipa_hbacrule.py
index d168a3a7e0..cb828f68e9 100644
--- a/plugins/modules/ipa_hbacrule.py
+++ b/plugins/modules/ipa_hbacrule.py
@@ -32,8 +32,8 @@ options:
host:
description:
- List of host names to assign.
- - If an empty list is passed all hosts will be removed from the rule.
- - If option is omitted hosts will not be checked or changed.
+ - If an empty list is passed all hosts are removed from the rule.
+ - If option is omitted hosts are not checked or changed.
required: false
type: list
elements: str
@@ -44,15 +44,15 @@ options:
hostgroup:
description:
- List of hostgroup names to assign.
- - If an empty list is passed all hostgroups will be removed from the rule.
- - If option is omitted hostgroups will not be checked or changed.
+ - If an empty list is passed all hostgroups are removed from the rule.
+ - If option is omitted hostgroups are not checked or changed.
type: list
elements: str
service:
description:
- List of service names to assign.
- - If an empty list is passed all services will be removed from the rule.
- - If option is omitted services will not be checked or changed.
+ - If an empty list is passed all services are removed from the rule.
+ - If option is omitted services are not checked or changed.
type: list
elements: str
servicecategory:
@@ -62,15 +62,15 @@ options:
servicegroup:
description:
- List of service group names to assign.
- - If an empty list is passed all assigned service groups will be removed from the rule.
- - If option is omitted service groups will not be checked or changed.
+ - If an empty list is passed all assigned service groups are removed from the rule.
+ - If option is omitted service groups are not checked or changed.
type: list
elements: str
sourcehost:
description:
- List of source host names to assign.
- - If an empty list if passed all assigned source hosts will be removed from the rule.
- - If option is omitted source hosts will not be checked or changed.
+ - If an empty list if passed all assigned source hosts are removed from the rule.
+ - If option is omitted source hosts are not checked or changed.
type: list
elements: str
sourcehostcategory:
@@ -80,8 +80,8 @@ options:
sourcehostgroup:
description:
- List of source host group names to assign.
- - If an empty list if passed all assigned source host groups will be removed from the rule.
- - If option is omitted source host groups will not be checked or changed.
+ - If an empty list if passed all assigned source host groups are removed from the rule.
+ - If option is omitted source host groups are not checked or changed.
type: list
elements: str
state:
@@ -92,8 +92,8 @@ options:
user:
description:
- List of user names to assign.
- - If an empty list if passed all assigned users will be removed from the rule.
- - If option is omitted users will not be checked or changed.
+ - If an empty list if passed all assigned users are removed from the rule.
+ - If option is omitted users are not checked or changed.
type: list
elements: str
usercategory:
@@ -103,8 +103,8 @@ options:
usergroup:
description:
- List of user group names to assign.
- - If an empty list if passed all assigned user groups will be removed from the rule.
- - If option is omitted user groups will not be checked or changed.
+ - If an empty list if passed all assigned user groups are removed from the rule.
+ - If option is omitted user groups are not checked or changed.
type: list
elements: str
extends_documentation_fragment:
diff --git a/plugins/modules/ipa_host.py b/plugins/modules/ipa_host.py
index a78ea6223e..c88f3c0adb 100644
--- a/plugins/modules/ipa_host.py
+++ b/plugins/modules/ipa_host.py
@@ -42,9 +42,9 @@ options:
mac_address:
description:
- List of Hardware MAC address(es) off this host.
- - If option is omitted MAC addresses will not be checked or changed.
- - If an empty list is passed all assigned MAC addresses will be removed.
- - MAC addresses that are already assigned but not passed will be removed.
+ - If option is omitted MAC addresses are not checked nor changed.
+ - If an empty list is passed all assigned MAC addresses are removed.
+ - MAC addresses that are already assigned but not passed are removed.
aliases: ["macaddress"]
type: list
elements: str
@@ -66,9 +66,9 @@ options:
user_certificate:
description:
- List of Base-64 encoded server certificates.
- - If option is omitted certificates will not be checked or changed.
- - If an empty list is passed all assigned certificates will be removed.
- - Certificates already assigned but not passed will be removed.
+ - If option is omitted certificates are not checked nor changed.
+ - If an empty list is passed all assigned certificates are removed.
+ - Certificates already assigned but not passed are removed.
aliases: ["usercertificate"]
type: list
elements: str
diff --git a/plugins/modules/ipa_hostgroup.py b/plugins/modules/ipa_hostgroup.py
index c1e7d3ad56..ffe87fca4c 100644
--- a/plugins/modules/ipa_hostgroup.py
+++ b/plugins/modules/ipa_hostgroup.py
@@ -22,7 +22,7 @@ options:
append:
description:
- If V(true), add the listed O(host) to the O(hostgroup).
- - If V(false), only the listed O(host) will be in O(hostgroup), removing any other hosts.
+ - If V(false), only the listed O(host) is set in O(hostgroup), removing any other hosts.
default: false
type: bool
version_added: 6.6.0
@@ -40,17 +40,17 @@ options:
host:
description:
- List of hosts that belong to the host-group.
- - If an empty list is passed all hosts will be removed from the group.
- - If option is omitted hosts will not be checked or changed.
- - If option is passed all assigned hosts that are not passed will be unassigned from the group.
+ - If an empty list is passed all hosts are removed from the group.
+ - If option is omitted hosts are not checked nor changed.
+ - If option is passed all assigned hosts that are not passed are unassigned from the group.
type: list
elements: str
hostgroup:
description:
- List of host-groups than belong to that host-group.
- - If an empty list is passed all host-groups will be removed from the group.
- - If option is omitted host-groups will not be checked or changed.
- - If option is passed all assigned hostgroups that are not passed will be unassigned from the group.
+ - If an empty list is passed all host-groups are removed from the group.
+ - If option is omitted host-groups are not checked nor changed.
+ - If option is passed all assigned hostgroups that are not passed are unassigned from the group.
type: list
elements: str
state:
diff --git a/plugins/modules/ipa_otptoken.py b/plugins/modules/ipa_otptoken.py
index 5f5c8dd612..e8c99bd302 100644
--- a/plugins/modules/ipa_otptoken.py
+++ b/plugins/modules/ipa_otptoken.py
@@ -26,7 +26,7 @@ options:
aliases: ["name"]
type: str
newuniqueid:
- description: If specified, the unique ID specified will be changed to this.
+ description: If specified, the unique ID specified is changed to this.
type: str
otptype:
description:
@@ -37,7 +37,7 @@ options:
secretkey:
description:
- Token secret (Base64).
- - If OTP is created and this is not specified, a random secret will be generated by IPA.
+ - If OTP is created and this is not specified, a random secret is generated by IPA.
- B(Note:) Cannot be modified after OTP is created.
type: str
description:
@@ -54,13 +54,13 @@ options:
description:
- First date/time the token can be used.
- In the format C(YYYYMMddHHmmss).
- - For example, V(20180121182022) will allow the token to be used starting on 21 January 2018 at 18:20:22.
+ - For example, V(20180121182022) allows the token to be used starting on 21 January 2018 at 18:20:22.
type: str
notafter:
description:
- Last date/time the token can be used.
- In the format C(YYYYMMddHHmmss).
- - For example, V(20200121182022) will allow the token to be used until 21 January 2020 at 18:20:22.
+ - For example, V(20200121182022) allows the token to be used until 21 January 2020 at 18:20:22.
type: str
vendor:
description: Token vendor name (informational only).
@@ -84,7 +84,7 @@ options:
type: str
digits:
description:
- - Number of digits each token code will have.
+ - Number of digits each token code has.
- B(Note:) Cannot be modified after OTP is created.
choices: [6, 8]
type: int
diff --git a/plugins/modules/ipa_role.py b/plugins/modules/ipa_role.py
index e77b732cb2..6057deec7b 100644
--- a/plugins/modules/ipa_role.py
+++ b/plugins/modules/ipa_role.py
@@ -33,41 +33,41 @@ options:
group:
description:
- List of group names assign to this role.
- - If an empty list is passed all assigned groups will be unassigned from the role.
- - If option is omitted groups will not be checked or changed.
- - If option is passed all assigned groups that are not passed will be unassigned from the role.
+ - If an empty list is passed all assigned groups are unassigned from the role.
+ - If option is omitted groups are not checked nor changed.
+ - If option is passed all assigned groups that are not passed are unassigned from the role.
type: list
elements: str
host:
description:
- List of host names to assign.
- - If an empty list is passed all assigned hosts will be unassigned from the role.
- - If option is omitted hosts will not be checked or changed.
- - If option is passed all assigned hosts that are not passed will be unassigned from the role.
+ - If an empty list is passed all assigned hosts are unassigned from the role.
+ - If option is omitted hosts are not checked nor changed.
+ - If option is passed all assigned hosts that are not passed are unassigned from the role.
type: list
elements: str
hostgroup:
description:
- List of host group names to assign.
- - If an empty list is passed all assigned host groups will be removed from the role.
- - If option is omitted host groups will not be checked or changed.
- - If option is passed all assigned hostgroups that are not passed will be unassigned from the role.
+ - If an empty list is passed all assigned host groups are removed from the role.
+ - If option is omitted host groups are not checked nor changed.
+ - If option is passed all assigned hostgroups that are not passed are unassigned from the role.
type: list
elements: str
privilege:
description:
- List of privileges granted to the role.
- - If an empty list is passed all assigned privileges will be removed.
- - If option is omitted privileges will not be checked or changed.
- - If option is passed all assigned privileges that are not passed will be removed.
+ - If an empty list is passed all assigned privileges are removed.
+ - If option is omitted privileges are not checked nor changed.
+ - If option is passed all assigned privileges that are not passed are removed.
type: list
elements: str
service:
description:
- List of service names to assign.
- - If an empty list is passed all assigned services will be removed from the role.
- - If option is omitted services will not be checked or changed.
- - If option is passed all assigned services that are not passed will be removed from the role.
+ - If an empty list is passed all assigned services are removed from the role.
+ - If option is omitted services are not checked nor changed.
+ - If option is passed all assigned services that are not passed are removed from the role.
type: list
elements: str
state:
@@ -78,8 +78,8 @@ options:
user:
description:
- List of user names to assign.
- - If an empty list is passed all assigned users will be removed from the role.
- - If option is omitted users will not be checked or changed.
+ - If an empty list is passed all assigned users are removed from the role.
+ - If option is omitted users are not checked nor changed.
type: list
elements: str
extends_documentation_fragment:
diff --git a/plugins/modules/ipa_service.py b/plugins/modules/ipa_service.py
index 54c5575950..51ace78760 100644
--- a/plugins/modules/ipa_service.py
+++ b/plugins/modules/ipa_service.py
@@ -197,10 +197,10 @@ def main():
argument_spec = ipa_argument_spec()
argument_spec.update(
krbcanonicalname=dict(type='str', required=True, aliases=['name']),
- force=dict(type='bool', required=False),
- skip_host_check=dict(type='bool', default=False, required=False),
- hosts=dict(type='list', required=False, elements='str'),
- state=dict(type='str', required=False, default='present',
+ force=dict(type='bool'),
+ skip_host_check=dict(type='bool', default=False),
+ hosts=dict(type='list', elements='str'),
+ state=dict(type='str', default='present',
choices=['present', 'absent']))
module = AnsibleModule(argument_spec=argument_spec,
diff --git a/plugins/modules/ipa_sudocmdgroup.py b/plugins/modules/ipa_sudocmdgroup.py
index c7ab798f4c..4298032121 100644
--- a/plugins/modules/ipa_sudocmdgroup.py
+++ b/plugins/modules/ipa_sudocmdgroup.py
@@ -37,8 +37,8 @@ options:
sudocmd:
description:
- List of sudo commands to assign to the group.
- - If an empty list is passed all assigned commands will be removed from the group.
- - If option is omitted sudo commands will not be checked or changed.
+ - If an empty list is passed all assigned commands are removed from the group.
+ - If option is omitted sudo commands are not checked nor changed.
type: list
elements: str
extends_documentation_fragment:
diff --git a/plugins/modules/ipa_sudorule.py b/plugins/modules/ipa_sudorule.py
index 1670a52035..ae3730da62 100644
--- a/plugins/modules/ipa_sudorule.py
+++ b/plugins/modules/ipa_sudorule.py
@@ -34,31 +34,31 @@ options:
cmd:
description:
- List of commands assigned to the rule.
- - If an empty list is passed all commands will be removed from the rule.
- - If option is omitted commands will not be checked or changed.
+ - If an empty list is passed all commands are removed from the rule.
+ - If option is omitted commands are not checked nor changed.
type: list
elements: str
cmdgroup:
description:
- List of command groups assigned to the rule.
- - If an empty list is passed all command groups will be removed from the rule.
- - If option is omitted command groups will not be checked or changed.
+ - If an empty list is passed all command groups are removed from the rule.
+ - If option is omitted command groups are not checked nor changed.
type: list
elements: str
version_added: 2.0.0
deny_cmd:
description:
- List of denied commands assigned to the rule.
- - If an empty list is passed all commands will be removed from the rule.
- - If option is omitted commands will not be checked or changed.
+ - If an empty list is passed all commands are removed from the rule.
+ - If option is omitted commands are not checked nor changed.
type: list
elements: str
version_added: 8.1.0
deny_cmdgroup:
description:
- List of denied command groups assigned to the rule.
- - If an empty list is passed all command groups will be removed from the rule.
- - If option is omitted command groups will not be checked or changed.
+ - If an empty list is passed all command groups are removed from the rule.
+ - If option is omitted command groups are not checked nor changed.
type: list
elements: str
version_added: 8.1.0
@@ -69,8 +69,8 @@ options:
host:
description:
- List of hosts assigned to the rule.
- - If an empty list is passed all hosts will be removed from the rule.
- - If option is omitted hosts will not be checked or changed.
+ - If an empty list is passed all hosts are removed from the rule.
+ - If option is omitted hosts are not checked nor changed.
- Option O(hostcategory) must be omitted to assign hosts.
type: list
elements: str
@@ -84,8 +84,8 @@ options:
hostgroup:
description:
- List of host groups assigned to the rule.
- - If an empty list is passed all host groups will be removed from the rule.
- - If option is omitted host groups will not be checked or changed.
+ - If an empty list is passed all host groups are removed from the rule.
+ - If option is omitted host groups are not checked nor changed.
- Option O(hostcategory) must be omitted to assign host groups.
type: list
elements: str
@@ -113,8 +113,8 @@ options:
user:
description:
- List of users assigned to the rule.
- - If an empty list is passed all users will be removed from the rule.
- - If option is omitted users will not be checked or changed.
+ - If an empty list is passed all users are removed from the rule.
+ - If option is omitted users are not checked nor changed.
type: list
elements: str
usercategory:
@@ -125,8 +125,8 @@ options:
usergroup:
description:
- List of user groups assigned to the rule.
- - If an empty list is passed all user groups will be removed from the rule.
- - If option is omitted user groups will not be checked or changed.
+ - If an empty list is passed all user groups are removed from the rule.
+ - If option is omitted user groups are not checked nor changed.
type: list
elements: str
state:
diff --git a/plugins/modules/ipa_user.py b/plugins/modules/ipa_user.py
index 47d50972bd..6e61f89600 100644
--- a/plugins/modules/ipa_user.py
+++ b/plugins/modules/ipa_user.py
@@ -35,9 +35,9 @@ options:
type: str
krbpasswordexpiration:
description:
- - Date at which the user password will expire.
+ - Date at which the user password expires.
- In the format YYYYMMddHHmmss.
- - For example V(20180121182022) will expire on 21 January 2018 at 18:20:22.
+ - For example V(20180121182022) expires on 21 January 2018 at 18:20:22.
type: str
loginshell:
description: Login shell.
@@ -45,14 +45,14 @@ options:
mail:
description:
- List of mail addresses assigned to the user.
- - If an empty list is passed all assigned email addresses will be deleted.
- - If None is passed email addresses will not be checked or changed.
+ - If an empty list is passed all assigned email addresses are deleted.
+ - If None is passed email addresses are not checked nor changed.
type: list
elements: str
password:
description:
- Password for a user.
- - Will not be set for an existing user unless O(update_password=always), which is the default.
+ - It is not set for an existing user unless O(update_password=always), which is the default.
type: str
sn:
description:
@@ -62,8 +62,8 @@ options:
sshpubkey:
description:
- List of public SSH key.
- - If an empty list is passed all assigned public keys will be deleted.
- - If None is passed SSH public keys will not be checked or changed.
+ - If an empty list is passed all assigned public keys are deleted.
+ - If None is passed SSH public keys are not checked nor changed.
type: list
elements: str
state:
@@ -74,8 +74,8 @@ options:
telephonenumber:
description:
- List of telephone numbers assigned to the user.
- - If an empty list is passed all assigned telephone numbers will be deleted.
- - If None is passed telephone numbers will not be checked or changed.
+ - If an empty list is passed all assigned telephone numbers are deleted.
+ - If None is passed telephone numbers are not checked nor changed.
type: list
elements: str
title:
diff --git a/plugins/modules/ipbase_info.py b/plugins/modules/ipbase_info.py
index 3c7d3d26c1..7a2dde13d6 100644
--- a/plugins/modules/ipbase_info.py
+++ b/plugins/modules/ipbase_info.py
@@ -21,7 +21,7 @@ extends_documentation_fragment:
options:
ip:
description:
- - The IP you want to get the info for. If not specified the API will detect the IP automatically.
+ - The IP you want to get the info for. If not specified the API detects the IP automatically.
required: false
type: str
apikey:
@@ -31,7 +31,7 @@ options:
type: str
hostname:
description:
- - If the O(hostname) parameter is set to V(true), the API response will contain the hostname of the IP.
+ - If the O(hostname) parameter is set to V(true), the API response contains the hostname of the IP.
required: false
type: bool
default: false
@@ -71,147 +71,148 @@ data:
of the response."
returned: success
type: dict
- sample: {
- "ip": "1.1.1.1",
- "hostname": "one.one.one.one",
- "type": "v4",
- "range_type": {
- "type": "PUBLIC",
- "description": "Public address"
- },
- "connection": {
- "asn": 13335,
- "organization": "Cloudflare, Inc.",
- "isp": "APNIC Research and Development",
- "range": "1.1.1.1/32"
- },
- "location": {
- "geonames_id": 5332870,
- "latitude": 34.053611755371094,
- "longitude": -118.24549865722656,
- "zip": "90012",
- "continent": {
- "code": "NA",
- "name": "North America",
- "name_translated": "North America"
+ sample:
+ {
+ "ip": "1.1.1.1",
+ "hostname": "one.one.one.one",
+ "type": "v4",
+ "range_type": {
+ "type": "PUBLIC",
+ "description": "Public address"
},
- "country": {
- "alpha2": "US",
- "alpha3": "USA",
- "calling_codes": [
- "+1"
- ],
- "currencies": [
- {
- "symbol": "$",
- "name": "US Dollar",
- "symbol_native": "$",
- "decimal_digits": 2,
- "rounding": 0,
- "code": "USD",
- "name_plural": "US dollars"
- }
- ],
- "emoji": "...",
- "ioc": "USA",
- "languages": [
- {
- "name": "English",
- "name_native": "English"
- }
- ],
- "name": "United States",
- "name_translated": "United States",
- "timezones": [
- "America/New_York",
- "America/Detroit",
- "America/Kentucky/Louisville",
- "America/Kentucky/Monticello",
- "America/Indiana/Indianapolis",
- "America/Indiana/Vincennes",
- "America/Indiana/Winamac",
- "America/Indiana/Marengo",
- "America/Indiana/Petersburg",
- "America/Indiana/Vevay",
- "America/Chicago",
- "America/Indiana/Tell_City",
- "America/Indiana/Knox",
- "America/Menominee",
- "America/North_Dakota/Center",
- "America/North_Dakota/New_Salem",
- "America/North_Dakota/Beulah",
- "America/Denver",
- "America/Boise",
- "America/Phoenix",
- "America/Los_Angeles",
- "America/Anchorage",
- "America/Juneau",
- "America/Sitka",
- "America/Metlakatla",
- "America/Yakutat",
- "America/Nome",
- "America/Adak",
- "Pacific/Honolulu"
- ],
- "is_in_european_union": false,
- "fips": "US",
- "geonames_id": 6252001,
- "hasc_id": "US",
- "wikidata_id": "Q30"
+ "connection": {
+ "asn": 13335,
+ "organization": "Cloudflare, Inc.",
+ "isp": "APNIC Research and Development",
+ "range": "1.1.1.1/32"
},
- "city": {
- "fips": "644000",
- "alpha2": null,
- "geonames_id": 5368753,
- "hasc_id": null,
- "wikidata_id": "Q65",
- "name": "Los Angeles",
- "name_translated": "Los Angeles"
+ "location": {
+ "geonames_id": 5332870,
+ "latitude": 34.053611755371094,
+ "longitude": -118.24549865722656,
+ "zip": "90012",
+ "continent": {
+ "code": "NA",
+ "name": "North America",
+ "name_translated": "North America"
+ },
+ "country": {
+ "alpha2": "US",
+ "alpha3": "USA",
+ "calling_codes": [
+ "+1"
+ ],
+ "currencies": [
+ {
+ "symbol": "$",
+ "name": "US Dollar",
+ "symbol_native": "$",
+ "decimal_digits": 2,
+ "rounding": 0,
+ "code": "USD",
+ "name_plural": "US dollars"
+ }
+ ],
+ "emoji": "...",
+ "ioc": "USA",
+ "languages": [
+ {
+ "name": "English",
+ "name_native": "English"
+ }
+ ],
+ "name": "United States",
+ "name_translated": "United States",
+ "timezones": [
+ "America/New_York",
+ "America/Detroit",
+ "America/Kentucky/Louisville",
+ "America/Kentucky/Monticello",
+ "America/Indiana/Indianapolis",
+ "America/Indiana/Vincennes",
+ "America/Indiana/Winamac",
+ "America/Indiana/Marengo",
+ "America/Indiana/Petersburg",
+ "America/Indiana/Vevay",
+ "America/Chicago",
+ "America/Indiana/Tell_City",
+ "America/Indiana/Knox",
+ "America/Menominee",
+ "America/North_Dakota/Center",
+ "America/North_Dakota/New_Salem",
+ "America/North_Dakota/Beulah",
+ "America/Denver",
+ "America/Boise",
+ "America/Phoenix",
+ "America/Los_Angeles",
+ "America/Anchorage",
+ "America/Juneau",
+ "America/Sitka",
+ "America/Metlakatla",
+ "America/Yakutat",
+ "America/Nome",
+ "America/Adak",
+ "Pacific/Honolulu"
+ ],
+ "is_in_european_union": false,
+ "fips": "US",
+ "geonames_id": 6252001,
+ "hasc_id": "US",
+ "wikidata_id": "Q30"
+ },
+ "city": {
+ "fips": "644000",
+ "alpha2": null,
+ "geonames_id": 5368753,
+ "hasc_id": null,
+ "wikidata_id": "Q65",
+ "name": "Los Angeles",
+ "name_translated": "Los Angeles"
+ },
+ "region": {
+ "fips": "US06",
+ "alpha2": "US-CA",
+ "geonames_id": 5332921,
+ "hasc_id": "US.CA",
+ "wikidata_id": "Q99",
+ "name": "California",
+ "name_translated": "California"
+ }
},
- "region": {
- "fips": "US06",
- "alpha2": "US-CA",
- "geonames_id": 5332921,
- "hasc_id": "US.CA",
- "wikidata_id": "Q99",
- "name": "California",
- "name_translated": "California"
+ "tlds": [
+ ".us"
+ ],
+ "timezone": {
+ "id": "America/Los_Angeles",
+ "current_time": "2023-05-04T04:30:28-07:00",
+ "code": "PDT",
+ "is_daylight_saving": true,
+ "gmt_offset": -25200
+ },
+ "security": {
+ "is_anonymous": false,
+ "is_datacenter": false,
+ "is_vpn": false,
+ "is_bot": false,
+ "is_abuser": true,
+ "is_known_attacker": true,
+ "is_proxy": false,
+ "is_spam": false,
+ "is_tor": false,
+ "is_icloud_relay": false,
+ "threat_score": 100
+ },
+ "domains": {
+ "count": 10943,
+ "domains": [
+ "eliwise.academy",
+ "accountingprose.academy",
+ "pistola.academy",
+ "1and1-test-ntlds-fr.accountant",
+ "omnergy.africa"
+ ]
}
- },
- "tlds": [
- ".us"
- ],
- "timezone": {
- "id": "America/Los_Angeles",
- "current_time": "2023-05-04T04:30:28-07:00",
- "code": "PDT",
- "is_daylight_saving": true,
- "gmt_offset": -25200
- },
- "security": {
- "is_anonymous": false,
- "is_datacenter": false,
- "is_vpn": false,
- "is_bot": false,
- "is_abuser": true,
- "is_known_attacker": true,
- "is_proxy": false,
- "is_spam": false,
- "is_tor": false,
- "is_icloud_relay": false,
- "threat_score": 100
- },
- "domains": {
- "count": 10943,
- "domains": [
- "eliwise.academy",
- "accountingprose.academy",
- "pistola.academy",
- "1and1-test-ntlds-fr.accountant",
- "omnergy.africa"
- ]
}
- }
"""
from ansible.module_utils.basic import AnsibleModule
@@ -284,10 +285,10 @@ class IpbaseInfo(object):
def main():
module_args = dict(
- ip=dict(type='str', required=False, no_log=False),
- apikey=dict(type='str', required=False, no_log=True),
- hostname=dict(type='bool', required=False, no_log=False, default=False),
- language=dict(type='str', required=False, no_log=False, default='en'),
+ ip=dict(type='str', no_log=False),
+ apikey=dict(type='str', no_log=True),
+ hostname=dict(type='bool', no_log=False, default=False),
+ language=dict(type='str', no_log=False, default='en'),
)
module = AnsibleModule(
diff --git a/plugins/modules/ipify_facts.py b/plugins/modules/ipify_facts.py
index 7767c8d0ff..b7cd2b7447 100644
--- a/plugins/modules/ipify_facts.py
+++ b/plugins/modules/ipify_facts.py
@@ -24,7 +24,7 @@ options:
api_url:
description:
- URL of the ipify.org API service.
- - C(?format=json) will be appended per default.
+ - C(?format=json) is appended by default.
type: str
default: https://api.ipify.org/
timeout:
@@ -34,7 +34,7 @@ options:
default: 10
validate_certs:
description:
- - When set to V(false), SSL certificates will not be validated.
+ - When set to V(false), SSL certificates are not validated.
type: bool
default: true
notes:
diff --git a/plugins/modules/ipmi_boot.py b/plugins/modules/ipmi_boot.py
index bd96b35a51..69131732c6 100644
--- a/plugins/modules/ipmi_boot.py
+++ b/plugins/modules/ipmi_boot.py
@@ -95,17 +95,17 @@ author: "Bulat Gaifullin (@bgaifullin) "
RETURN = r"""
bootdev:
- description: The boot device name which will be used beyond next boot.
+ description: The boot device name which is used beyond next boot.
returned: success
type: str
sample: default
persistent:
- description: If True, system firmware will use this device beyond next boot.
+ description: If V(true), system firmware uses this device beyond next boot.
returned: success
type: bool
sample: false
uefimode:
- description: If True, system firmware will use UEFI boot explicitly beyond next boot.
+ description: If V(true), system firmware uses UEFI boot explicitly beyond next boot.
returned: success
type: bool
sample: false
diff --git a/plugins/modules/ipmi_power.py b/plugins/modules/ipmi_power.py
index e230fe4060..292ecc73aa 100644
--- a/plugins/modules/ipmi_power.py
+++ b/plugins/modules/ipmi_power.py
@@ -111,7 +111,17 @@ status:
targetAddress:
description: The remote target address.
type: int
- sample: [{"powerstate": "on", "targetAddress": 48}, {"powerstate": "on", "targetAddress": 50}]
+ sample:
+ [
+ {
+ "powerstate": "on",
+ "targetAddress": 48
+ },
+ {
+ "powerstate": "on",
+ "targetAddress": 50
+ }
+ ]
"""
EXAMPLES = r"""
diff --git a/plugins/modules/iptables_state.py b/plugins/modules/iptables_state.py
index 6f3fa19042..21fe75ce02 100644
--- a/plugins/modules/iptables_state.py
+++ b/plugins/modules/iptables_state.py
@@ -27,8 +27,8 @@ description:
notes:
- The rollback feature is not a module option and depends on task's attributes. To enable it, the module must be played
asynchronously, in other words by setting task attributes C(poll) to V(0), and C(async) to a value less or equal to C(ANSIBLE_TIMEOUT).
- If C(async) is greater, the rollback will still happen if it shall happen, but you will experience a connection timeout
- instead of more relevant info returned by the module after its failure.
+ If C(async) is greater, the rollback still happens when needed, but you experience a connection timeout instead of more
+ relevant info returned by the module after its failure.
attributes:
check_mode:
support: full
@@ -147,7 +147,8 @@ initial_state:
type: list
elements: str
returned: always
- sample: [
+ sample:
+ [
"# Generated by xtables-save v1.8.2",
"*filter",
":INPUT ACCEPT [0:0]",
@@ -161,7 +162,8 @@ restored:
type: list
elements: str
returned: always
- sample: [
+ sample:
+ [
"# Generated by xtables-save v1.8.2",
"*filter",
":INPUT DROP [0:0]",
@@ -180,7 +182,8 @@ saved:
type: list
elements: str
returned: always
- sample: [
+ sample:
+ [
"# Generated by xtables-save v1.8.2",
"*filter",
":INPUT ACCEPT [0:0]",
diff --git a/plugins/modules/ipwcli_dns.py b/plugins/modules/ipwcli_dns.py
index 118f59e8d9..604eb82b5f 100644
--- a/plugins/modules/ipwcli_dns.py
+++ b/plugins/modules/ipwcli_dns.py
@@ -16,7 +16,7 @@ short_description: Manage DNS Records for Ericsson IPWorks using C(ipwcli)
version_added: '0.2.0'
description:
- - Manage DNS records for the Ericsson IPWorks DNS server. The module will use the C(ipwcli) to deploy the DNS records.
+ - Manage DNS records for the Ericsson IPWorks DNS server. The module uses the C(ipwcli) to deploy the DNS records.
requirements:
- ipwcli (installed on Ericsson IPWorks)
@@ -271,18 +271,18 @@ def run_module():
dnsname=dict(type='str', required=True),
type=dict(type='str', required=True, choices=['A', 'AAAA', 'SRV', 'NAPTR']),
container=dict(type='str', required=True),
- address=dict(type='str', required=False),
- ttl=dict(type='int', required=False, default=3600),
+ address=dict(type='str'),
+ ttl=dict(type='int', default=3600),
state=dict(type='str', default='present', choices=['absent', 'present']),
- priority=dict(type='int', required=False, default=10),
- weight=dict(type='int', required=False, default=10),
- port=dict(type='int', required=False),
- target=dict(type='str', required=False),
- order=dict(type='int', required=False),
- preference=dict(type='int', required=False),
- flags=dict(type='str', required=False, choices=['S', 'A', 'U', 'P']),
- service=dict(type='str', required=False),
- replacement=dict(type='str', required=False),
+ priority=dict(type='int', default=10),
+ weight=dict(type='int', default=10),
+ port=dict(type='int'),
+ target=dict(type='str'),
+ order=dict(type='int'),
+ preference=dict(type='int'),
+ flags=dict(type='str', choices=['S', 'A', 'U', 'P']),
+ service=dict(type='str'),
+ replacement=dict(type='str'),
username=dict(type='str', required=True),
password=dict(type='str', required=True, no_log=True)
)
diff --git a/plugins/modules/irc.py b/plugins/modules/irc.py
index 36fc31fc37..d18c9fd85f 100644
--- a/plugins/modules/irc.py
+++ b/plugins/modules/irc.py
@@ -51,19 +51,35 @@ options:
description:
- Text color for the message.
default: "none"
- choices: ["none", "white", "black", "blue", "green", "red", "brown", "purple", "orange", "yellow", "light_green", "teal",
- "light_cyan", "light_blue", "pink", "gray", "light_gray"]
+ choices:
+ - none
+ - white
+ - black
+ - blue
+ - green
+ - red
+ - brown
+ - purple
+ - orange
+ - yellow
+ - light_green
+ - teal
+ - light_cyan
+ - light_blue
+ - pink
+ - gray
+ - light_gray
aliases: [colour]
channel:
type: str
description:
- - Channel name. One of nick_to or channel needs to be set. When both are set, the message will be sent to both of them.
+ - Channel name. One of nick_to or channel needs to be set. When both are set, the message is sent to both of them.
nick_to:
type: list
elements: str
description:
- A list of nicknames to send the message to. One of nick_to or channel needs to be set. When both are defined, the
- message will be sent to both of them.
+ message is sent to both of them.
key:
type: str
description:
@@ -102,7 +118,7 @@ options:
default: none
validate_certs:
description:
- - If set to V(false), the SSL certificates will not be validated.
+ - If set to V(false), the SSL certificates are not validated.
- This should always be set to V(true). Using V(false) is unsafe and should only be done if the network between between
Ansible and the IRC server is known to be safe.
- B(Note:) for security reasons, you should always set O(use_tls=true) and O(validate_certs=true) whenever possible.
@@ -216,9 +232,11 @@ def send_msg(msg, server='localhost', port='6667', channel=None, nick_to=None, k
irc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if use_tls:
+ kwargs = {}
if validate_certs:
try:
context = ssl.create_default_context()
+ kwargs["server_hostname"] = server
except AttributeError:
raise Exception('Need at least Python 2.7.9 for SSL certificate validation')
else:
@@ -228,7 +246,7 @@ def send_msg(msg, server='localhost', port='6667', channel=None, nick_to=None, k
else:
context = ssl.SSLContext()
context.verify_mode = ssl.CERT_NONE
- irc = context.wrap_socket(irc)
+ irc = context.wrap_socket(irc, **kwargs)
irc.connect((server, int(port)))
if passwd:
@@ -293,7 +311,7 @@ def main():
server=dict(default='localhost'),
port=dict(type='int', default=6667),
nick=dict(default='ansible'),
- nick_to=dict(required=False, type='list', elements='str'),
+ nick_to=dict(type='list', elements='str'),
msg=dict(required=True),
color=dict(default="none", aliases=['colour'], choices=["white", "black", "blue",
"green", "red", "brown",
@@ -302,7 +320,7 @@ def main():
"light_blue", "pink", "gray",
"light_gray", "none"]),
style=dict(default="none", choices=["underline", "reverse", "bold", "italic", "none"]),
- channel=dict(required=False),
+ channel=dict(),
key=dict(no_log=True),
topic=dict(),
passwd=dict(no_log=True),
diff --git a/plugins/modules/iso_create.py b/plugins/modules/iso_create.py
index 008cb271bb..70f76558e6 100644
--- a/plugins/modules/iso_create.py
+++ b/plugins/modules/iso_create.py
@@ -32,8 +32,8 @@ attributes:
options:
src_files:
description:
- - This is a list of absolute paths of source files or folders which will be contained in the new generated ISO file.
- - Will fail if specified file or folder in O(src_files) does not exist on local machine.
+ - This is a list of absolute paths of source files or folders to be contained in the new generated ISO file.
+ - The module fails if specified file or folder in O(src_files) does not exist on local machine.
- 'Note: With all ISO9660 levels from 1 to 3, all file names are restricted to uppercase letters, numbers and underscores
(_). File names are limited to 31 characters, directory nesting is limited to 8 levels, and path names are limited
to 255 characters.'
@@ -43,7 +43,7 @@ options:
dest_iso:
description:
- The absolute path with file name of the new generated ISO file on local machine.
- - Will create intermediate folders when they does not exist.
+ - It creates intermediate folders when they do not exist.
type: path
required: true
interchange_level:
diff --git a/plugins/modules/iso_customize.py b/plugins/modules/iso_customize.py
index feac8417b8..5ee5b22c2c 100644
--- a/plugins/modules/iso_customize.py
+++ b/plugins/modules/iso_customize.py
@@ -14,7 +14,7 @@ module: iso_customize
short_description: Add/remove/change files in ISO file
description:
- This module is used to add/remove/change files in ISO file.
- - The file inside ISO will be overwritten if it exists by option O(add_files).
+ - The file inside ISO is overwritten if it exists by option O(add_files).
author:
- Yuhua Zou (@ZouYuhua)
requirements:
@@ -51,7 +51,7 @@ options:
add_files:
description:
- Allows to add and replace files in the ISO file.
- - Will create intermediate folders inside the ISO file when they do not exist.
+ - It creates intermediate folders inside the ISO file when they do not exist.
type: list
required: false
elements: dict
@@ -69,9 +69,9 @@ options:
required: true
notes:
- The C(pycdlib) library states it supports Python 2.7 and 3.4+.
- - The function C(add_file) in pycdlib will overwrite the existing file in ISO with type ISO9660 / Rock Ridge 1.12 / Joliet
- / UDF. But it will not overwrite the existing file in ISO with Rock Ridge 1.09 / 1.10. So we take workaround "delete the
- existing file and then add file for ISO with Rock Ridge".
+ - The function C(add_file) in pycdlib is designed to overwrite the existing file in ISO with type ISO9660 / Rock Ridge 1.12
+ / Joliet / UDF. But it does not overwrite the existing file in ISO with Rock Ridge 1.09 / 1.10. So we take workaround
+ "delete the existing file and then add file for ISO with Rock Ridge".
"""
EXAMPLES = r"""
diff --git a/plugins/modules/iso_extract.py b/plugins/modules/iso_extract.py
index 8cda967b64..88644a6eb6 100644
--- a/plugins/modules/iso_extract.py
+++ b/plugins/modules/iso_extract.py
@@ -55,19 +55,19 @@ options:
required: true
force:
description:
- - If V(true), which will replace the remote file when contents are different than the source.
- - If V(false), the file will only be extracted and copied if the destination does not already exist.
+ - If V(true), it replaces the remote file when contents are different than the source.
+ - If V(false), the file is only extracted and copied if the destination does not already exist.
type: bool
default: true
executable:
description:
- The path to the C(7z) executable to use for extracting files from the ISO.
- - If not provided, it will assume the value V(7z).
+ - If not provided, it assumes the value V(7z).
type: path
password:
description:
- Password used to decrypt files from the ISO.
- - Will only be used if 7z is used.
+ - It is only used if C(7z) is used.
- The password is used as a command line argument to 7z. This is a B(potential security risk) that allows passwords
to be revealed if someone else can list running processes on the same machine in the right moment.
type: str
diff --git a/plugins/modules/jabber.py b/plugins/modules/jabber.py
index 01a34ff9f5..ab73672410 100644
--- a/plugins/modules/jabber.py
+++ b/plugins/modules/jabber.py
@@ -108,9 +108,9 @@ def main():
password=dict(required=True, no_log=True),
to=dict(required=True),
msg=dict(required=True),
- host=dict(required=False),
- port=dict(required=False, default=5222, type='int'),
- encoding=dict(required=False),
+ host=dict(),
+ port=dict(default=5222, type='int'),
+ encoding=dict(),
),
supports_check_mode=True
)
diff --git a/plugins/modules/java_cert.py b/plugins/modules/java_cert.py
index b75021fc57..13cfea9324 100644
--- a/plugins/modules/java_cert.py
+++ b/plugins/modules/java_cert.py
@@ -32,7 +32,7 @@ options:
cert_port:
description:
- Port to connect to URL.
- - This will be used to create server URL:PORT.
+ - This is used to create server URL:PORT.
type: int
default: 443
cert_path:
@@ -98,8 +98,8 @@ options:
state:
description:
- Defines action which can be either certificate import or removal.
- - When state is present, the certificate will always idempotently be inserted into the keystore, even if there already
- exists a cert alias that is different.
+ - When O(state=present), the certificate is always inserted into the keystore, even if there already exists a cert alias
+ that is different.
type: str
choices: [absent, present]
default: present
@@ -197,18 +197,6 @@ EXAMPLES = r"""
"""
RETURN = r"""
-msg:
- description: Output from stdout of keytool command after execution of given command.
- returned: success
- type: str
- sample: "Module require existing keystore at keystore_path '/tmp/test/cacerts'"
-
-rc:
- description: Keytool command execution return value.
- returned: success
- type: int
- sample: "0"
-
cmd:
description: Executed command to get action done.
returned: success
diff --git a/plugins/modules/java_keystore.py b/plugins/modules/java_keystore.py
index df7e71abbe..c826c9af4c 100644
--- a/plugins/modules/java_keystore.py
+++ b/plugins/modules/java_keystore.py
@@ -24,8 +24,8 @@ options:
name:
description:
- Name of the certificate in the keystore.
- - If the provided name does not exist in the keystore, the module will re-create the keystore. This behavior changed
- in community.general 3.0.0, before that the module would fail when the name did not match.
+ - If the provided name does not exist in the keystore, the module re-creates the keystore. This behavior changed in
+ community.general 3.0.0, before that the module would fail when the name did not match.
type: str
required: true
certificate:
@@ -62,7 +62,7 @@ options:
password:
description:
- Password that should be used to secure the keystore.
- - If the provided password fails to unlock the keystore, the module will re-create the keystore with the new passphrase.
+ - If the provided password fails to unlock the keystore, the module re-creates the keystore with the new passphrase.
This behavior changed in community.general 3.0.0, before that the module would fail when the password did not match.
type: str
required: true
@@ -130,7 +130,7 @@ notes:
or with the P(ansible.builtin.file#lookup) lookup), while O(certificate_path) and O(private_key_path) require that the
files are available on the target host.
- By design, any change of a value of options O(keystore_type), O(name) or O(password), as well as changes of key or certificate
- materials will cause the existing O(dest) to be overwritten.
+ materials causes the existing O(dest) to be overwritten.
"""
EXAMPLES = r"""
@@ -166,24 +166,12 @@ EXAMPLES = r"""
"""
RETURN = r"""
-msg:
- description: Output from stdout of keytool/openssl command after execution of given command or an error.
- returned: changed and failure
- type: str
- sample: "Unable to find the current certificate fingerprint in ..."
-
err:
description: Output from stderr of keytool/openssl command after error of given command.
returned: failure
type: str
sample: "Keystore password is too short - must be at least 6 characters\n"
-rc:
- description: Keytool/openssl command execution return value.
- returned: changed and failure
- type: int
- sample: "0"
-
cmd:
description: Executed command to get action done.
returned: changed and failure
diff --git a/plugins/modules/jenkins_build.py b/plugins/modules/jenkins_build.py
index cec8fcc490..4e11dd3642 100644
--- a/plugins/modules/jenkins_build.py
+++ b/plugins/modules/jenkins_build.py
@@ -99,6 +99,16 @@ EXAMPLES = r"""
state: stopped
url: http://localhost:8080
+- name: Trigger Jenkins build in detached mode
+ community.general.jenkins_build:
+ name: "detached-build"
+ state: present
+ user: admin
+ token: abcdefghijklmnopqrstuvwxyz123456
+ url: http://localhost:8080
+ detach: true
+ time_between_checks: 20
+
- name: Delete a jenkins build using token authentication
community.general.jenkins_build:
name: "delete-experiment"
@@ -180,11 +190,11 @@ class JenkinsBuild:
def get_jenkins_connection(self):
try:
- if (self.user and self.password):
+ if self.user and self.password:
return jenkins.Jenkins(self.jenkins_url, self.user, self.password)
- elif (self.user and self.token):
+ elif self.user and self.token:
return jenkins.Jenkins(self.jenkins_url, self.user, self.token)
- elif (self.user and not (self.password or self.token)):
+ elif self.user and not (self.password or self.token):
return jenkins.Jenkins(self.jenkins_url, self.user)
else:
return jenkins.Jenkins(self.jenkins_url)
diff --git a/plugins/modules/jenkins_build_info.py b/plugins/modules/jenkins_build_info.py
index f252eb504a..85cb22ad2c 100644
--- a/plugins/modules/jenkins_build_info.py
+++ b/plugins/modules/jenkins_build_info.py
@@ -30,7 +30,7 @@ options:
build_number:
description:
- An integer which specifies a build of a job.
- - If not specified the last build information will be returned.
+ - If not specified the last build information is returned.
type: int
password:
description:
@@ -138,11 +138,11 @@ class JenkinsBuildInfo:
def get_jenkins_connection(self):
try:
- if (self.user and self.password):
+ if self.user and self.password:
return jenkins.Jenkins(self.jenkins_url, self.user, self.password)
- elif (self.user and self.token):
+ elif self.user and self.token:
return jenkins.Jenkins(self.jenkins_url, self.user, self.token)
- elif (self.user and not (self.password or self.token)):
+ elif self.user and not (self.password or self.token):
return jenkins.Jenkins(self.jenkins_url, self.user)
else:
return jenkins.Jenkins(self.jenkins_url)
diff --git a/plugins/modules/jenkins_credential.py b/plugins/modules/jenkins_credential.py
new file mode 100644
index 0000000000..3bd8a9dd7a
--- /dev/null
+++ b/plugins/modules/jenkins_credential.py
@@ -0,0 +1,863 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = r"""
+module: jenkins_credential
+short_description: Manage Jenkins credentials and domains through API
+version_added: 11.1.0
+description:
+ - This module allows managing Jenkins credentials and domain scopes through the Jenkins HTTP API.
+ - Create, update, and delete different credential types such as C(username/password), C(secret text), C(SSH key), C(certificates),
+ C(GitHub App), and domains.
+ - For scoped domains (O(type=scope)), it supports restrictions based on V(hostname), V(hostname:port), V(path), and V(scheme).
+requirements:
+ - urllib3 >= 1.26.0
+author:
+ - Youssef Ali (@YoussefKhalidAli)
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ id:
+ description:
+ - The ID of the Jenkins credential or domain.
+ type: str
+ type:
+ description:
+ - Type of the credential or action.
+ choices:
+ - user_and_pass
+ - file
+ - text
+ - github_app
+ - ssh_key
+ - certificate
+ - scope
+ - token
+ type: str
+ state:
+ description:
+ - The state of the credential.
+ choices:
+ - present
+ - absent
+ default: present
+ type: str
+ scope:
+ description:
+ - Jenkins credential domain scope.
+ - Deleting a domain scope deletes all credentials within it.
+ type: str
+ default: '_'
+ force:
+ description:
+ - Force update if the credential already exists, used with O(state=present).
+ - If set to V(true), it deletes the existing credential before creating a new one.
+ - Always returns RV(ignore:changed=true).
+ type: bool
+ default: false
+ url:
+ description:
+ - Jenkins server URL.
+ type: str
+ default: http://localhost:8080
+ jenkins_user:
+ description:
+ - Jenkins user for authentication.
+ required: true
+ type: str
+ jenkins_password:
+ description:
+ - Jenkins password for token creation. Required if O(type=token).
+ type: str
+ token:
+ description:
+ - Jenkins API token. Required unless O(type=token).
+ type: str
+ description:
+ description:
+ - Description of the credential or domain.
+ default: ''
+ type: str
+ location:
+ description:
+ - Location of the credential. Either V(system) or V(folder).
+ - If O(location=folder) then O(url) must be set to V(/job/).
+ choices:
+ - system
+ - folder
+ default: 'system'
+ type: str
+ name:
+ description:
+ - Name of the token to generate. Required if O(type=token).
+ - When generating a new token, do not pass O(id). It is generated automatically.
+ - Creating two tokens with the same name generates two distinct tokens with different RV(token_uuid) values.
+ - Replacing a token with another one of the same name requires deleting the original first using O(force=True).
+ type: str
+ username:
+ description:
+ - Username for credentials types that require it (for example O(type=ssh_key) or O(type=user_and_pass)).
+ type: str
+ password:
+ description:
+ - Password for credentials types that require it (for example O(type=user_and_passs) or O(type=certificate)).
+ type: str
+ secret:
+ description:
+ - Secret text (used when O(type=text)).
+ type: str
+ appID:
+ description:
+ - GitHub App ID.
+ type: str
+ api_uri:
+ description:
+ - Link to Github API.
+ default: 'https://api.github.com'
+ type: str
+ owner:
+ description:
+ - GitHub App owner.
+ type: str
+ file_path:
+ description:
+ - File path to secret file (for example O(type=file) or O(type=certificate)).
+ - For O(type=certificate), this can be a V(.p12) or V(.pem) file.
+ type: path
+ private_key_path:
+ description:
+ - Path to private key file for PEM certificates or GitHub Apps.
+ type: path
+ passphrase:
+ description:
+ - SSH passphrase if needed.
+ type: str
+ inc_hostname:
+ description:
+ - List of hostnames to include in scope.
+ type: list
+ elements: str
+ exc_hostname:
+ description:
+ - List of hostnames to exclude from scope.
+ - If a hostname appears in both this list and O(inc_hostname), the hostname is excluded.
+ type: list
+ elements: str
+ inc_hostname_port:
+ description:
+ - List of V(host:port) to include in scope.
+ type: list
+ elements: str
+ exc_hostname_port:
+ description:
+ - List of host:port to exclude from scope.
+ - If a hostname and port appears in both this list and O(inc_hostname_port), it is excluded.
+ type: list
+ elements: str
+ inc_path:
+ description:
+ - List of URL paths to include when matching credentials to domains.
+ - 'B(Matching is hierarchical): subpaths of excluded paths are also excluded, even if explicitly included.'
+ type: list
+ elements: str
+ exc_path:
+ description:
+ - List of URL paths to exclude.
+ - If a path is also matched by O(exc_path), it is excluded.
+ - If you exclude a subpath of a path previously included, that subpath alone is excluded.
+ type: list
+ elements: str
+ schemes:
+ description:
+ - List of schemes (for example V(http) or V(https)) to match.
+ type: list
+ elements: str
+"""
+
+EXAMPLES = r"""
+- name: Generate token
+ community.general.jenkins_credential:
+ id: "test-token"
+ jenkins_user: "admin"
+ jenkins_password: "password"
+ type: "token"
+ register: token_result
+
+- name: Add CUSTOM scope credential
+ community.general.jenkins_credential:
+ id: "CUSTOM"
+ type: "scope"
+ jenkins_user: "admin"
+ token: "{{ token }}"
+ description: "Custom scope credential"
+ inc_path:
+ - "include/path"
+ - "include/path2"
+ exc_path:
+ - "exclude/path"
+ - "exclude/path2"
+ inc_hostname:
+ - "included-hostname"
+ - "included-hostname2"
+ exc_hostname:
+ - "excluded-hostname"
+ - "excluded-hostname2"
+ schemes:
+ - "http"
+ - "https"
+ inc_hostname_port:
+ - "included-hostname:7000"
+ - "included-hostname2:7000"
+ exc_hostname_port:
+ - "excluded-hostname:7000"
+ - "excluded-hostname2:7000"
+
+- name: Add user_and_pass credential
+ community.general.jenkins_credential:
+ id: "userpass-id"
+ type: "user_and_pass"
+ jenkins_user: "admin"
+ token: "{{ token }}"
+ description: "User and password credential"
+ username: "user1"
+ password: "pass1"
+
+- name: Add file credential to custom scope
+ community.general.jenkins_credential:
+ id: "file-id"
+ type: "file"
+ jenkins_user: "admin"
+ token: "{{ token }}"
+ scope: "CUSTOM"
+ description: "File credential"
+ file_path: "../vars/my-secret.pem"
+
+- name: Add text credential to folder
+ community.general.jenkins_credential:
+ id: "text-id"
+ type: "text"
+ jenkins_user: "admin"
+ token: "{{ token }}"
+ description: "Text credential"
+ secret: "mysecrettext"
+ location: "folder"
+ url: "http://localhost:8080/job/test"
+
+- name: Add githubApp credential
+ community.general.jenkins_credential:
+ id: "githubapp-id"
+ type: "github_app"
+ jenkins_user: "admin"
+ token: "{{ token }}"
+ description: "GitHub app credential"
+ appID: "12345"
+ file_path: "../vars/github.pem"
+ owner: "github_owner"
+
+- name: Add sshKey credential
+ community.general.jenkins_credential:
+ id: "sshkey-id"
+ type: "ssh_key"
+ jenkins_user: "admin"
+ token: "{{ token }}"
+ description: "SSH key credential"
+ username: "sshuser"
+ file_path: "../vars/ssh_key"
+ passphrase: 1234
+
+- name: Add certificate credential (p12)
+ community.general.jenkins_credential:
+ id: "certificate-id"
+ type: "certificate"
+ jenkins_user: "admin"
+ token: "{{ token }}"
+ description: "Certificate credential"
+ password: "12345678901234"
+ file_path: "../vars/certificate.p12"
+
+- name: Add certificate credential (pem)
+ community.general.jenkins_credential:
+ id: "certificate-id-pem"
+ type: "certificate"
+ jenkins_user: "admin"
+ token: "{{ token }}"
+ description: "Certificate credential (pem)"
+ file_path: "../vars/cert.pem"
+ private_key_path: "../vars/private.key"
+"""
+RETURN = r"""
+details:
+ description: Return more details in case of errors.
+ type: str
+ returned: failed
+token:
+ description:
+ - The generated API token if O(type=token).
+ - This is needed to authenticate API calls later.
+ - This should be stored securely, as it is the only time it is returned.
+ type: str
+ returned: success
+token_uuid:
+ description:
+ - The generated ID of the token.
+ - You pass this value back to the module as O(id) to edit or revoke the token later.
+ - This should be stored securely, as it is the only time it is returned.
+ type: str
+ returned: success
+"""
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import fetch_url, basic_auth_header
+from ansible.module_utils.six.moves.urllib.parse import urlencode
+from ansible_collections.community.general.plugins.module_utils import deps
+
+import json
+import os
+import base64
+
+with deps.declare("urllib3", reason="urllib3 is required to embed files into requests"):
+ import urllib3
+
+
+# Function to validate file paths exist on disk
+def validate_file_exist(module, path):
+
+ if path and not os.path.exists(path):
+ module.fail_json(msg="File not found: {}".format(path))
+
+
+# Gets the Jenkins crumb for CSRF protection which is required for API calls
+def get_jenkins_crumb(module, headers):
+ type = module.params["type"]
+ url = module.params["url"]
+
+ if "/job" in url:
+ url = url.split("/job")[0]
+
+ crumb_url = "{}/crumbIssuer/api/json".format(url)
+
+ response, info = fetch_url(module, crumb_url, headers=headers)
+
+ if info["status"] != 200:
+ module.fail_json(msg="Failed to fetch Jenkins crumb. Confirm token is real.")
+
+ # Cookie is needed to generate API token
+ cookie = info.get("set-cookie", "")
+ session_cookie = cookie.split(";")[0] if cookie else None
+
+ try:
+ data = response.read()
+ json_data = json.loads(data)
+ crumb_request_field = json_data["crumbRequestField"]
+ crumb = json_data["crumb"]
+ headers[crumb_request_field] = crumb # Set the crumb in headers
+ headers["Content-Type"] = (
+ "application/x-www-form-urlencoded" # Set Content-Type for form data
+ )
+ if type == "token":
+ headers["Cookie"] = (
+ session_cookie # Set session cookie for token operations
+ )
+ return crumb_request_field, crumb, session_cookie # Return for test purposes
+
+ except Exception:
+ return None
+
+
+# Function to clean the data sent via API by removing unwanted keys and None values
+def clean_data(data):
+ # Keys to remove (including those with None values)
+ keys_to_remove = {
+ "url",
+ "token",
+ "jenkins_user",
+ "jenkins_password",
+ "file_path",
+ "private_key_path",
+ "type",
+ "state",
+ "force",
+ "name",
+ "scope",
+ "location",
+ "api_uri",
+ }
+
+ # Filter out None values and unwanted keys
+ cleaned_data = {
+ key: value
+ for key, value in data.items()
+ if value is not None and key not in keys_to_remove
+ }
+
+ return cleaned_data
+
+
+# Function to check if credentials/domain exists
+def target_exists(module, check_domain=False):
+ url = module.params["url"]
+ location = module.params["location"]
+ scope = module.params["scope"]
+ name = module.params["id"]
+ user = module.params["jenkins_user"]
+ token = module.params["token"]
+
+ headers = {"Authorization": basic_auth_header(user, token)}
+
+ if module.params["type"] == "scope" or check_domain:
+ target_url = "{}/credentials/store/{}/domain/{}/api/json".format(
+ url, location, scope if check_domain else name
+ )
+ elif module.params["type"] == "token":
+ return False # Can't check token
+ else:
+ target_url = "{}/credentials/store/{}/domain/{}/credential/{}/api/json".format(
+ url, location, scope, name
+ )
+
+ response, info = fetch_url(module, target_url, headers=headers)
+ status = info.get("status", 0)
+
+ if status == 200:
+ return True
+ elif status == 404:
+ return False
+ else:
+ module.fail_json(
+ msg="Unexpected status code {} when checking {} existence.".format(
+ status, name
+ )
+ )
+
+
+# Function to delete the scope or credential provided
+def delete_target(module, headers):
+ user = module.params["jenkins_user"]
+ type = module.params["type"]
+ url = module.params["url"]
+ location = module.params["location"]
+ id = module.params["id"]
+ scope = module.params["scope"]
+
+ body = False
+
+ try:
+
+ if type == "token":
+ delete_url = "{}/user/{}/descriptorByName/jenkins.security.ApiTokenProperty/revoke".format(
+ url, user
+ )
+ body = urlencode({"tokenUuid": id})
+
+ elif type == "scope":
+ delete_url = "{}/credentials/store/{}/domain/{}/doDelete".format(
+ url, location, id
+ )
+
+ else:
+ delete_url = (
+ "{}/credentials/store/{}/domain/{}/credential/{}/doDelete".format(
+ url, location, scope, id
+ )
+ )
+
+ response, info = fetch_url(
+ module,
+ delete_url,
+ headers=headers,
+ data=body if body else None,
+ method="POST",
+ )
+
+ status = info.get("status", 0)
+ if not status == 200:
+ module.fail_json(
+ msg="Failed to delete: HTTP {}, {}, {}".format(
+ status, response, headers
+ )
+ )
+
+ except Exception as e:
+ module.fail_json(msg="Exception during delete: {}".format(str(e)))
+
+
+# Function to read the private key for types texts and ssh_key
+def read_privateKey(module):
+ try:
+ with open(module.params["private_key_path"], "r") as f:
+ private_key = f.read().strip()
+ return private_key
+ except Exception as e:
+ module.fail_json(msg="Failed to read private key file: {}".format(str(e)))
+
+
+# Function to builds multipart form-data body and content-type header for file credential upload.
+# Returns:
+# body (bytes): Encoded multipart data
+# content_type (str): Content-Type header including boundary
+def embed_file_into_body(module, file_path, credentials):
+
+ filename = os.path.basename(file_path)
+
+ try:
+ with open(file_path, "rb") as f:
+ file_bytes = f.read()
+ except Exception as e:
+ module.fail_json(msg="Failed to read file: {}".format(str(e)))
+ return "", "" # Return for test purposes
+
+ credentials.update(
+ {
+ "file": "file0",
+ "fileName": filename,
+ }
+ )
+
+ payload = {"credentials": credentials}
+
+ fields = {"file0": (filename, file_bytes), "json": json.dumps(payload)}
+
+ body, content_type = urllib3.encode_multipart_formdata(fields)
+ return body, content_type
+
+
+# Main function to run the Ansible module
+def run_module():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ id=dict(type="str"),
+ type=dict(
+ type="str",
+ choices=[
+ "user_and_pass",
+ "file",
+ "text",
+ "github_app",
+ "ssh_key",
+ "certificate",
+ "scope",
+ "token",
+ ],
+ ),
+ state=dict(type="str", default="present", choices=["present", "absent"]),
+ force=dict(type="bool", default=False),
+ scope=dict(type="str", default="_"),
+ url=dict(type="str", default="http://localhost:8080"),
+ jenkins_user=dict(type="str", required=True),
+ jenkins_password=dict(type="str", no_log=True),
+ token=dict(type="str", no_log=True),
+ description=dict(type="str", default=""),
+ location=dict(type="str", default="system", choices=["system", "folder"]),
+ name=dict(type="str"),
+ username=dict(type="str"),
+ password=dict(type="str", no_log=True),
+ file_path=dict(type="path"),
+ secret=dict(type="str", no_log=True),
+ appID=dict(type="str"),
+ api_uri=dict(type="str", default="https://api.github.com"),
+ owner=dict(type="str"),
+ passphrase=dict(type="str", no_log=True),
+ private_key_path=dict(type="path", no_log=True),
+ # Scope specifications parameters
+ inc_hostname=dict(type="list", elements="str"),
+ exc_hostname=dict(type="list", elements="str"),
+ inc_hostname_port=dict(type="list", elements="str"),
+ exc_hostname_port=dict(type="list", elements="str"),
+ inc_path=dict(type="list", elements="str"),
+ exc_path=dict(type="list", elements="str"),
+ schemes=dict(type="list", elements="str"),
+ ),
+ supports_check_mode=True,
+ required_if=[
+ ("state", "present", ["type"]),
+ ("state", "absent", ["id"]),
+ ("type", "token", ["name", "jenkins_password"]),
+ ("type", "user_and_pass", ["username", "password", "id", "token"]),
+ ("type", "file", ["file_path", "id", "token"]),
+ ("type", "text", ["secret", "id", "token"]),
+ ("type", "github_app", ["appID", "private_key_path", "id", "token"]),
+ ("type", "ssh_key", ["username", "private_key_path", "id", "token"]),
+ ("type", "certificate", ["file_path", "id", "token"]),
+ ("type", "scope", ["id", "token"]),
+ ],
+ )
+
+ # Parameters
+ id = module.params["id"]
+ type = module.params["type"]
+ state = module.params["state"]
+ force = module.params["force"]
+ scope = module.params["scope"]
+ url = module.params["url"]
+ jenkins_user = module.params["jenkins_user"]
+ jenkins_password = module.params["jenkins_password"]
+ name = module.params["name"]
+ token = module.params["token"]
+ description = module.params["description"]
+ location = module.params["location"]
+ filePath = module.params["file_path"]
+ private_key_path = module.params["private_key_path"]
+ api_uri = module.params["api_uri"]
+ inc_hostname = module.params["inc_hostname"]
+ exc_hostname = module.params["exc_hostname"]
+ inc_hostname_port = module.params["inc_hostname_port"]
+ exc_hostname_port = module.params["exc_hostname_port"]
+ inc_path = module.params["inc_path"]
+ exc_path = module.params["exc_path"]
+ schemes = module.params["schemes"]
+
+ deps.validate(module)
+
+ headers = {
+ "Authorization": basic_auth_header(jenkins_user, token or jenkins_password),
+ }
+
+ # Get the crumb for CSRF protection
+ get_jenkins_crumb(module, headers)
+
+ result = dict(
+ changed=False,
+ msg="",
+ )
+
+ credentials = clean_data(module.params)
+
+ does_exist = target_exists(module)
+
+ # Check if the credential/domain doesn't exist and the user wants to delete
+ if not does_exist and state == "absent" and not type == "token":
+ result["changed"] = False
+ result["msg"] = "{} does not exist.".format(id)
+ module.exit_json(**result)
+
+ if state == "present":
+
+ # If updating, we need to delete the existing credential/domain first based on force parameter
+ if force and (does_exist or type == "token"):
+ delete_target(module, headers)
+ elif does_exist and not force:
+ result["changed"] = False
+ result["msg"] = "{} already exists. Use force=True to update.".format(id)
+ module.exit_json(**result)
+
+ if type == "token":
+
+ post_url = "{}/user/{}/descriptorByName/jenkins.security.ApiTokenProperty/generateNewToken".format(
+ url, jenkins_user
+ )
+
+ body = "newTokenName={}".format(name)
+
+ elif type == "scope":
+
+ post_url = "{}/credentials/store/{}/createDomain".format(url, location)
+
+ specifications = []
+
+ # Create a domain in Jenkins
+ if inc_hostname or exc_hostname:
+ specifications.append(
+ {
+ "stapler-class": "com.cloudbees.plugins.credentials.domains.HostnameSpecification",
+ "includes": ",".join(inc_hostname),
+ "excludes": ",".join(exc_hostname),
+ }
+ )
+
+ if inc_hostname_port or exc_hostname_port:
+ specifications.append(
+ {
+ "stapler-class": "com.cloudbees.plugins.credentials.domains.HostnamePortSpecification",
+ "includes": ",".join(inc_hostname_port),
+ "excludes": ",".join(exc_hostname_port),
+ }
+ )
+
+ if schemes:
+ specifications.append(
+ {
+ "stapler-class": "com.cloudbees.plugins.credentials.domains.SchemeSpecification",
+ "schemes": ",".join(schemes),
+ },
+ )
+
+ if inc_path or exc_path:
+ specifications.append(
+ {
+ "stapler-class": "com.cloudbees.plugins.credentials.domains.PathSpecification",
+ "includes": ",".join(inc_path),
+ "excludes": ",".join(exc_path),
+ }
+ )
+
+ payload = {
+ "name": id,
+ "description": description,
+ "specifications": specifications,
+ }
+
+ else:
+ if filePath:
+ validate_file_exist(module, filePath)
+ elif private_key_path:
+ validate_file_exist(module, private_key_path)
+
+ post_url = "{}/credentials/store/{}/domain/{}/createCredentials".format(
+ url, location, scope
+ )
+
+ cred_class = {
+ "user_and_pass": "com.cloudbees.plugins.credentials.impl.UsernamePasswordCredentialsImpl",
+ "file": "org.jenkinsci.plugins.plaincredentials.impl.FileCredentialsImpl",
+ "text": "org.jenkinsci.plugins.plaincredentials.impl.StringCredentialsImpl",
+ "github_app": "org.jenkinsci.plugins.github_branch_source.GitHubAppCredentials",
+ "ssh_key": "com.cloudbees.jenkins.plugins.sshcredentials.impl.BasicSSHUserPrivateKey",
+ "certificate": "com.cloudbees.plugins.credentials.impl.CertificateCredentialsImpl",
+ }
+ credentials.update({"$class": cred_class[type]})
+
+ if type == "file":
+
+ # Build multipart body and content-type
+ body, content_type = embed_file_into_body(module, filePath, credentials)
+ headers["Content-Type"] = content_type
+
+ elif type == "github_app":
+
+ private_key = read_privateKey(module)
+
+ credentials.update(
+ {
+ "privateKey": private_key,
+ "apiUri": api_uri,
+ }
+ )
+
+ elif type == "ssh_key":
+
+ private_key = read_privateKey(module)
+
+ credentials.update(
+ {
+ "privateKeySource": {
+ "stapler-class": "com.cloudbees.jenkins.plugins.sshcredentials.impl.BasicSSHUserPrivateKey$DirectEntryPrivateKeySource",
+ "privateKey": private_key,
+ },
+ }
+ )
+
+ elif type == "certificate":
+
+ name, ext = os.path.splitext(filePath)
+
+ if ext.lower() in [".p12", ".pfx"]:
+ try:
+ with open(filePath, "rb") as f:
+ file_content = f.read()
+ uploaded_keystore = base64.b64encode(file_content).decode(
+ "utf-8"
+ )
+ except Exception as e:
+ module.fail_json(
+ msg="Failed to read or encode keystore file: {}".format(
+ str(e)
+ )
+ )
+
+ credentials.update(
+ {
+ "keyStoreSource": {
+ "$class": "com.cloudbees.plugins.credentials.impl.CertificateCredentialsImpl$UploadedKeyStoreSource",
+ "uploadedKeystore": uploaded_keystore,
+ },
+ }
+ )
+
+ elif ext.lower() in [".pem", ".crt"]: # PEM mode
+ try:
+ with open(filePath, "r") as f:
+ cert_chain = f.read()
+ with open(private_key_path, "r") as f:
+ private_key = f.read()
+ except Exception as e:
+ module.fail_json(
+ msg="Failed to read PEM files: {}".format(str(e))
+ )
+
+ credentials.update(
+ {
+ "keyStoreSource": {
+ "$class": "com.cloudbees.plugins.credentials.impl.CertificateCredentialsImpl$PEMEntryKeyStoreSource",
+ "certChain": cert_chain,
+ "privateKey": private_key,
+ },
+ }
+ )
+
+ else:
+ module.fail_json(
+ msg="Unsupported certificate file type. Only .p12, .pfx, .pem or .crt are supported."
+ )
+
+ payload = {"credentials": credentials}
+
+ if not type == "file" and not type == "token":
+ body = urlencode({"json": json.dumps(payload)})
+
+ else: # Delete
+
+ delete_target(module, headers)
+
+ module.exit_json(changed=True, msg="{} deleted successfully.".format(id))
+
+ if (
+ not type == "scope" and not scope == "_"
+ ): # Check if custom scope exists if adding to a custom scope
+ if not target_exists(module, True):
+ module.fail_json(msg="Domain {} doesn't exists".format(scope))
+
+ try:
+ response, info = fetch_url(
+ module, post_url, headers=headers, data=body, method="POST"
+ )
+ except Exception as e:
+ module.fail_json(msg="Request to {} failed: {}".format(post_url, str(e)))
+
+ status = info.get("status", 0)
+
+ if not status == 200:
+ body = response.read() if response else b""
+ module.fail_json(
+ msg="Failed to {} credential".format(
+ "add/update" if state == "present" else "delete"
+ ),
+ details=body.decode("utf-8", errors="ignore"),
+ )
+
+ if type == "token":
+ response_data = json.loads(response.read())
+ result["token"] = response_data["data"]["tokenValue"]
+ result["token_uuid"] = response_data["data"]["tokenUuid"]
+
+ result["changed"] = True
+ result["msg"] = response.read().decode("utf-8")
+
+ module.exit_json(**result)
+
+
+if __name__ == "__main__":
+ run_module()
diff --git a/plugins/modules/jenkins_job.py b/plugins/modules/jenkins_job.py
index 93d922ed22..8362a40255 100644
--- a/plugins/modules/jenkins_job.py
+++ b/plugins/modules/jenkins_job.py
@@ -76,8 +76,8 @@ options:
type: bool
default: true
description:
- - If set to V(false), the SSL certificates will not be validated. This should only set to V(false) used on personally
- controlled sites using self-signed certificates as it avoids verifying the source site.
+ - If set to V(false), the SSL certificates are not validated. This should only set to V(false) used on personally controlled
+ sites using self-signed certificates as it avoids verifying the source site.
- The C(python-jenkins) library only handles this by using the environment variable E(PYTHONHTTPSVERIFY).
version_added: 2.3.0
"""
@@ -350,14 +350,14 @@ def job_config_to_string(xml_str):
def main():
module = AnsibleModule(
argument_spec=dict(
- config=dict(type='str', required=False),
+ config=dict(type='str'),
name=dict(type='str', required=True),
- password=dict(type='str', required=False, no_log=True),
- state=dict(type='str', required=False, choices=['present', 'absent'], default="present"),
- enabled=dict(required=False, type='bool'),
- token=dict(type='str', required=False, no_log=True),
- url=dict(type='str', required=False, default="http://localhost:8080"),
- user=dict(type='str', required=False),
+ password=dict(type='str', no_log=True),
+ state=dict(type='str', choices=['present', 'absent'], default="present"),
+ enabled=dict(type='bool'),
+ token=dict(type='str', no_log=True),
+ url=dict(type='str', default="http://localhost:8080"),
+ user=dict(type='str'),
validate_certs=dict(type='bool', default=True),
),
mutually_exclusive=[
diff --git a/plugins/modules/jenkins_job_info.py b/plugins/modules/jenkins_job_info.py
index f406ec3b4b..37d9af3f56 100644
--- a/plugins/modules/jenkins_job_info.py
+++ b/plugins/modules/jenkins_job_info.py
@@ -53,7 +53,7 @@ options:
- User to authenticate with the Jenkins server.
validate_certs:
description:
- - If set to V(false), the SSL certificates will not be validated.
+ - If set to V(false), the SSL certificates are not validated.
- This should only set to V(false) used on personally controlled sites using self-signed certificates.
default: true
type: bool
@@ -135,7 +135,7 @@ jobs:
"fullname": "test-folder/test-job",
"url": "http://localhost:8080/job/test-job/",
"color": "blue"
- },
+ }
]
"""
diff --git a/plugins/modules/jenkins_node.py b/plugins/modules/jenkins_node.py
index affd462659..aa75100168 100644
--- a/plugins/modules/jenkins_node.py
+++ b/plugins/modules/jenkins_node.py
@@ -65,9 +65,9 @@ options:
offline_message:
description:
- Specifies the offline reason message to be set when configuring the Jenkins node state.
- - If O(offline_message) is given and requested O(state) is not V(disabled), an error will be raised.
+ - If O(offline_message) is given and requested O(state) is not V(disabled), an error is raised.
- Internally O(offline_message) is set using the V(toggleOffline) API, so updating the message when the node is already
- offline (current state V(disabled)) is not possible. In this case, a warning will be issued.
+ offline (current state V(disabled)) is not possible. In this case, a warning is issued.
type: str
version_added: 10.0.0
"""
diff --git a/plugins/modules/jenkins_plugin.py b/plugins/modules/jenkins_plugin.py
index 73ff40c725..f47dcfe92f 100644
--- a/plugins/modules/jenkins_plugin.py
+++ b/plugins/modules/jenkins_plugin.py
@@ -51,7 +51,7 @@ options:
type: str
description:
- Desired plugin state.
- - If set to V(latest), the check for new version will be performed every time. This is suitable to keep the plugin up-to-date.
+ - If set to V(latest), the check for new version is performed every time. This is suitable to keep the plugin up-to-date.
choices: [absent, present, pinned, unpinned, enabled, disabled, latest]
default: present
timeout:
@@ -64,8 +64,8 @@ options:
description:
- Number of seconds after which a new copy of the C(update-center.json) file is downloaded. This is used to avoid the
need to download the plugin to calculate its checksum when O(state=latest) is specified.
- - Set it to V(0) if no cache file should be used. In that case, the plugin file will always be downloaded to calculate
- its checksum when O(state=latest) is specified.
+ - Set it to V(0) if no cache file should be used. In that case, the plugin file is always downloaded to calculate its
+ checksum when O(state=latest) is specified.
default: 86400
updates_url:
type: list
diff --git a/plugins/modules/jenkins_script.py b/plugins/modules/jenkins_script.py
index bd30f9daa7..5a00581366 100644
--- a/plugins/modules/jenkins_script.py
+++ b/plugins/modules/jenkins_script.py
@@ -39,8 +39,8 @@ options:
default: http://localhost:8080
validate_certs:
description:
- - If set to V(false), the SSL certificates will not be validated. This should only set to V(false) used on personally
- controlled sites using self-signed certificates as it avoids verifying the source site.
+ - If set to V(false), the SSL certificates are not validated. This should only set to V(false) used on personally controlled
+ sites using self-signed certificates as it avoids verifying the source site.
type: bool
default: true
user:
@@ -142,12 +142,12 @@ def main():
module = AnsibleModule(
argument_spec=dict(
script=dict(required=True, type="str"),
- url=dict(required=False, type="str", default="http://localhost:8080"),
- validate_certs=dict(required=False, type="bool", default=True),
- user=dict(required=False, type="str", default=None),
- password=dict(required=False, no_log=True, type="str", default=None),
- timeout=dict(required=False, type="int", default=10),
- args=dict(required=False, type="dict", default=None)
+ url=dict(type="str", default="http://localhost:8080"),
+ validate_certs=dict(type="bool", default=True),
+ user=dict(type="str"),
+ password=dict(no_log=True, type="str"),
+ timeout=dict(type="int", default=10),
+ args=dict(type="dict")
)
)
diff --git a/plugins/modules/jira.py b/plugins/modules/jira.py
index 93d988e38b..f99c252675 100644
--- a/plugins/modules/jira.py
+++ b/plugins/modules/jira.py
@@ -117,14 +117,13 @@ options:
suboptions:
type:
description:
- - Use type to specify which of the JIRA visibility restriction types will be used.
+ - Use O(comment_visibility.type) to specify which of the JIRA visibility restriction types is used.
type: str
required: true
choices: [group, role]
value:
description:
- - Use value to specify value corresponding to the type of visibility restriction. For example name of the group
- or role.
+ - Specify value corresponding to the type of visibility restriction. For example name of the group or role.
type: str
required: true
version_added: '3.2.0'
@@ -165,12 +164,12 @@ options:
type: str
required: false
description:
- - Set issue from which link will be created.
+ - Set issue from which link is created.
outwardissue:
type: str
required: false
description:
- - Set issue to which link will be created.
+ - Set issue to which link is created.
fields:
type: dict
required: false
@@ -192,7 +191,7 @@ options:
maxresults:
required: false
description:
- - Limit the result of O(operation=search). If no value is specified, the default jira limit will be used.
+ - Limit the result of O(operation=search). If no value is specified, the default JIRA limit is used.
- Used when O(operation=search) only, ignored otherwise.
type: int
version_added: '0.2.0'
@@ -226,12 +225,12 @@ options:
content:
type: str
description:
- - The Base64 encoded contents of the file to attach. If not specified, the contents of O(attachment.filename) will
- be used instead.
+ - The Base64 encoded contents of the file to attach. If not specified, the contents of O(attachment.filename) is
+ used instead.
mimetype:
type: str
description:
- - The MIME type to supply for the upload. If not specified, best-effort detection will be done.
+ - The MIME type to supply for the upload. If not specified, best-effort detection is performed.
notes:
- Currently this only works with basic-auth, or tokens.
- To use with JIRA Cloud, pass the login e-mail as the O(username) and the API token as O(password).
@@ -560,7 +559,6 @@ class JIRA(StateModuleHelper):
),
supports_check_mode=False
)
- use_old_vardict = False
state_param = 'operation'
def __init_module__(self):
diff --git a/plugins/modules/kdeconfig.py b/plugins/modules/kdeconfig.py
index 334db3aee4..ac542d04e8 100644
--- a/plugins/modules/kdeconfig.py
+++ b/plugins/modules/kdeconfig.py
@@ -17,12 +17,12 @@ description:
options:
path:
description:
- - Path to the config file. If the file does not exist it will be created.
+ - Path to the config file. If the file does not exist it is created.
type: path
required: true
kwriteconfig_path:
description:
- - Path to the kwriteconfig executable. If not specified, Ansible will try to discover it.
+ - Path to the kwriteconfig executable. If not specified, Ansible tries to discover it.
type: path
values:
description:
@@ -141,7 +141,7 @@ def run_kwriteconfig(module, cmd, path, groups, key, value):
else:
args.append('false')
else:
- args.append(value)
+ args.extend(['--', value])
module.run_command(args, check_rc=True)
diff --git a/plugins/modules/kernel_blacklist.py b/plugins/modules/kernel_blacklist.py
index 1dbf94f629..e1cf3fddb5 100644
--- a/plugins/modules/kernel_blacklist.py
+++ b/plugins/modules/kernel_blacklist.py
@@ -65,7 +65,6 @@ class Blacklist(StateModuleHelper):
),
supports_check_mode=True,
)
- use_old_vardict = False
def __init_module__(self):
self.pattern = re.compile(r'^blacklist\s+{0}$'.format(re.escape(self.vars.name)))
diff --git a/plugins/modules/keycloak_authentication.py b/plugins/modules/keycloak_authentication.py
index a0daf42b35..ae6d24958c 100644
--- a/plugins/modules/keycloak_authentication.py
+++ b/plugins/modules/keycloak_authentication.py
@@ -190,17 +190,20 @@ msg:
type: str
end_state:
- description: Representation of the authentication after module execution.
- returned: on success
- type: dict
- sample: {
+ description: Representation of the authentication after module execution.
+ returned: on success
+ type: dict
+ sample:
+ {
"alias": "Copy of first broker login",
"authenticationExecutions": [
{
"alias": "review profile config",
"authenticationConfig": {
"alias": "review profile config",
- "config": { "update.profile.on.first.login": "missing" },
+ "config": {
+ "update.profile.on.first.login": "missing"
+ },
"id": "6f09e4fb-aad4-496a-b873-7fa9779df6d7"
},
"configurable": true,
@@ -210,7 +213,11 @@ end_state:
"level": 0,
"providerId": "idp-review-profile",
"requirement": "REQUIRED",
- "requirementChoices": [ "REQUIRED", "ALTERNATIVE", "DISABLED" ]
+ "requirementChoices": [
+ "REQUIRED",
+ "ALTERNATIVE",
+ "DISABLED"
+ ]
}
],
"builtIn": false,
@@ -360,8 +367,8 @@ def main():
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True,
- required_one_of=([['token', 'auth_realm', 'auth_username', 'auth_password']]),
- required_together=([['auth_realm', 'auth_username', 'auth_password']]),
+ required_one_of=([['token', 'auth_realm', 'auth_username', 'auth_password', 'auth_client_id', 'auth_client_secret']]),
+ required_together=([['auth_username', 'auth_password']]),
required_by={'refresh_token': 'auth_realm'},
)
diff --git a/plugins/modules/keycloak_authentication_required_actions.py b/plugins/modules/keycloak_authentication_required_actions.py
index 147acf9a1e..69183ce605 100644
--- a/plugins/modules/keycloak_authentication_required_actions.py
+++ b/plugins/modules/keycloak_authentication_required_actions.py
@@ -49,7 +49,7 @@ options:
type: dict
defaultAction:
description:
- - Indicates, if any new user will have the required action assigned to it.
+ - Indicates whether new users have the required action assigned to them.
type: bool
enabled:
description:
@@ -149,7 +149,7 @@ end_state:
type: dict
defaultAction:
description:
- - Indicates, if any new user will have the required action assigned to it.
+ - Indicates whether new users have the required action assigned to them.
sample: false
type: bool
enabled:
@@ -237,8 +237,8 @@ def main():
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
- required_one_of=([['token', 'auth_realm', 'auth_username', 'auth_password']]),
- required_together=([['auth_realm', 'auth_username', 'auth_password']]),
+ required_one_of=([['token', 'auth_realm', 'auth_username', 'auth_password', 'auth_client_id', 'auth_client_secret']]),
+ required_together=([['auth_username', 'auth_password']]),
required_by={'refresh_token': 'auth_realm'},
)
diff --git a/plugins/modules/keycloak_authz_authorization_scope.py b/plugins/modules/keycloak_authz_authorization_scope.py
index 6b2e3c30f6..78d70c7ee6 100644
--- a/plugins/modules/keycloak_authz_authorization_scope.py
+++ b/plugins/modules/keycloak_authz_authorization_scope.py
@@ -37,8 +37,8 @@ options:
state:
description:
- State of the authorization scope.
- - On V(present), the authorization scope will be created (or updated if it exists already).
- - On V(absent), the authorization scope will be removed if it exists.
+ - On V(present), the authorization scope is created (or updated if it exists already).
+ - On V(absent), the authorization scope is removed if it exists.
choices: ['present', 'absent']
default: 'present'
type: str
@@ -142,8 +142,8 @@ def main():
state=dict(type='str', default='present',
choices=['present', 'absent']),
name=dict(type='str', required=True),
- display_name=dict(type='str', required=False),
- icon_uri=dict(type='str', required=False),
+ display_name=dict(type='str'),
+ icon_uri=dict(type='str'),
client_id=dict(type='str', required=True),
realm=dict(type='str', required=True)
)
@@ -153,8 +153,8 @@ def main():
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True,
required_one_of=(
- [['token', 'auth_realm', 'auth_username', 'auth_password']]),
- required_together=([['auth_realm', 'auth_username', 'auth_password']]),
+ [['token', 'auth_realm', 'auth_username', 'auth_password', 'auth_client_id', 'auth_client_secret']]),
+ required_together=([['auth_username', 'auth_password']]),
required_by={'refresh_token': 'auth_realm'},
)
diff --git a/plugins/modules/keycloak_authz_custom_policy.py b/plugins/modules/keycloak_authz_custom_policy.py
index 5e1a2a6a2d..9607c0172c 100644
--- a/plugins/modules/keycloak_authz_custom_policy.py
+++ b/plugins/modules/keycloak_authz_custom_policy.py
@@ -38,8 +38,8 @@ options:
state:
description:
- State of the custom policy.
- - On V(present), the custom policy will be created (or updated if it exists already).
- - On V(absent), the custom policy will be removed if it exists.
+ - On V(present), the custom policy is created (or updated if it exists already).
+ - On V(absent), the custom policy is removed if it exists.
choices: ['present', 'absent']
default: 'present'
type: str
@@ -139,8 +139,8 @@ def main():
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True,
required_one_of=(
- [['token', 'auth_realm', 'auth_username', 'auth_password']]),
- required_together=([['auth_realm', 'auth_username', 'auth_password']]),
+ [['token', 'auth_realm', 'auth_username', 'auth_password', 'auth_client_id', 'auth_client_secret']]),
+ required_together=([['auth_username', 'auth_password']]),
required_by={'refresh_token': 'auth_realm'},
)
diff --git a/plugins/modules/keycloak_authz_permission.py b/plugins/modules/keycloak_authz_permission.py
index 683b5f8c18..74bc6cf956 100644
--- a/plugins/modules/keycloak_authz_permission.py
+++ b/plugins/modules/keycloak_authz_permission.py
@@ -43,8 +43,8 @@ options:
state:
description:
- State of the authorization permission.
- - On V(present), the authorization permission will be created (or updated if it exists already).
- - On V(absent), the authorization permission will be removed if it exists.
+ - On V(present), the authorization permission is created (or updated if it exists already).
+ - On V(absent), the authorization permission is removed if it exists.
choices: ['present', 'absent']
default: 'present'
type: str
@@ -237,13 +237,13 @@ def main():
state=dict(type='str', default='present',
choices=['present', 'absent']),
name=dict(type='str', required=True),
- description=dict(type='str', required=False),
+ description=dict(type='str'),
permission_type=dict(type='str', choices=['scope', 'resource'], required=True),
decision_strategy=dict(type='str', default='UNANIMOUS',
choices=['UNANIMOUS', 'AFFIRMATIVE', 'CONSENSUS']),
- resources=dict(type='list', elements='str', default=[], required=False),
- scopes=dict(type='list', elements='str', default=[], required=False),
- policies=dict(type='list', elements='str', default=[], required=False),
+ resources=dict(type='list', elements='str', default=[]),
+ scopes=dict(type='list', elements='str', default=[]),
+ policies=dict(type='list', elements='str', default=[]),
client_id=dict(type='str', required=True),
realm=dict(type='str', required=True)
)
@@ -253,8 +253,8 @@ def main():
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True,
required_one_of=(
- [['token', 'auth_realm', 'auth_username', 'auth_password']]),
- required_together=([['auth_realm', 'auth_username', 'auth_password']]),
+ [['token', 'auth_realm', 'auth_username', 'auth_password', 'auth_client_id', 'auth_client_secret']]),
+ required_together=([['auth_username', 'auth_password']]),
required_by={'refresh_token': 'auth_realm'},
)
diff --git a/plugins/modules/keycloak_authz_permission_info.py b/plugins/modules/keycloak_authz_permission_info.py
index 0271dfd4c4..af7318315f 100644
--- a/plugins/modules/keycloak_authz_permission_info.py
+++ b/plugins/modules/keycloak_authz_permission_info.py
@@ -134,8 +134,8 @@ def main():
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True,
required_one_of=(
- [['token', 'auth_realm', 'auth_username', 'auth_password']]),
- required_together=([['auth_realm', 'auth_username', 'auth_password']]),
+ [['token', 'auth_realm', 'auth_username', 'auth_password', 'auth_client_id', 'auth_client_secret']]),
+ required_together=([['auth_username', 'auth_password']]),
required_by={'refresh_token': 'auth_realm'},
)
diff --git a/plugins/modules/keycloak_client.py b/plugins/modules/keycloak_client.py
index e7a2de7c85..6c8a7b1383 100644
--- a/plugins/modules/keycloak_client.py
+++ b/plugins/modules/keycloak_client.py
@@ -37,8 +37,8 @@ options:
state:
description:
- State of the client.
- - On V(present), the client will be created (or updated if it exists already).
- - On V(absent), the client will be removed if it exists.
+ - On V(present), the client are created (or updated if it exists already).
+ - On V(absent), the client are removed if it exists.
choices: ['present', 'absent']
default: 'present'
type: str
@@ -116,8 +116,8 @@ options:
secret:
description:
- When using O(client_authenticator_type=client-secret) (the default), you can specify a secret here (otherwise one
- will be generated if it does not exit). If changing this secret, the module will not register a change currently (but
- the changed secret will be saved).
+ is generated if it does not exit). If changing this secret, the module does not register a change currently (but the
+ changed secret is saved).
type: str
registration_access_token:
@@ -130,8 +130,8 @@ options:
default_roles:
description:
- - List of default roles for this client. If the client roles referenced do not exist yet, they will be created. This
- is C(defaultRoles) in the Keycloak REST API.
+ - List of default roles for this client. If the client roles referenced do not exist yet, they are created. This is
+ C(defaultRoles) in the Keycloak REST API.
aliases:
- defaultRoles
type: list
@@ -232,7 +232,7 @@ options:
protocol:
description:
- Type of client.
- - At creation only, default value will be V(openid-connect) if O(protocol) is omitted.
+ - At creation only, default value is V(openid-connect) if O(protocol) is omitted.
- The V(docker-v2) value was added in community.general 8.6.0.
type: str
choices: ['openid-connect', 'saml', 'docker-v2']
@@ -261,7 +261,7 @@ options:
client_template:
description:
- - Client template to use for this client. If it does not exist this field will silently be dropped. This is C(clientTemplate)
+ - Client template to use for this client. If it does not exist this field is silently dropped. This is C(clientTemplate)
in the Keycloak REST API.
type: str
aliases:
@@ -454,7 +454,7 @@ options:
- A dict of further attributes for this client. This can contain various configuration settings; an example is given
in the examples section. While an exhaustive list of permissible options is not available; possible options as of
Keycloak 3.4 are listed below. The Keycloak API does not validate whether a given option is appropriate for the protocol
- used; if specified anyway, Keycloak will simply not use it.
+ used; if specified anyway, Keycloak does not use it.
type: dict
suboptions:
saml.authnstatement:
@@ -532,7 +532,7 @@ options:
- For OpenID-Connect clients, client certificate for validating JWT issued by client and signed by its key, base64-encoded.
x509.subjectdn:
description:
- - For OpenID-Connect clients, subject which will be used to authenticate the client.
+ - For OpenID-Connect clients, subject which is used to authenticate the client.
type: str
version_added: 9.5.0
@@ -704,19 +704,31 @@ proposed:
description: Representation of proposed client.
returned: always
type: dict
- sample: {clientId: "test"}
+ sample: {"clientId": "test"}
existing:
description: Representation of existing client (sample is truncated).
returned: always
type: dict
- sample: {"adminUrl": "http://www.example.com/admin_url", "attributes": {"request.object.signature.alg": "RS256"}}
+ sample:
+ {
+ "adminUrl": "http://www.example.com/admin_url",
+ "attributes": {
+ "request.object.signature.alg": "RS256"
+ }
+ }
end_state:
description: Representation of client after module execution (sample is truncated).
returned: on success
type: dict
- sample: {"adminUrl": "http://www.example.com/admin_url", "attributes": {"request.object.signature.alg": "RS256"}}
+ sample:
+ {
+ "adminUrl": "http://www.example.com/admin_url",
+ "attributes": {
+ "request.object.signature.alg": "RS256"
+ }
+ }
"""
from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, camel, \
@@ -941,8 +953,8 @@ def main():
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True,
required_one_of=([['client_id', 'id'],
- ['token', 'auth_realm', 'auth_username', 'auth_password']]),
- required_together=([['auth_realm', 'auth_username', 'auth_password']]),
+ ['token', 'auth_realm', 'auth_username', 'auth_password', 'auth_client_id', 'auth_client_secret']]),
+ required_together=([['auth_username', 'auth_password']]),
required_by={'refresh_token': 'auth_realm'},
)
diff --git a/plugins/modules/keycloak_client_rolemapping.py b/plugins/modules/keycloak_client_rolemapping.py
index cb1cad8291..1700c99cc1 100644
--- a/plugins/modules/keycloak_client_rolemapping.py
+++ b/plugins/modules/keycloak_client_rolemapping.py
@@ -22,9 +22,9 @@ description:
the scope tailored to your needs and a user having the expected roles.
- The names of module options are snake_cased versions of the camelCase ones found in the Keycloak API and its documentation
at U(https://www.keycloak.org/docs-api/8.0/rest-api/index.html).
- - Attributes are multi-valued in the Keycloak API. All attributes are lists of individual values and will be returned that
- way by this module. You may pass single values for attributes when calling the module, and this will be translated into
- a list suitable for the API.
+ - Attributes are multi-valued in the Keycloak API. All attributes are lists of individual values and are returned that way
+ by this module. You may pass single values for attributes when calling the module, and this is translated into a list
+ suitable for the API.
- When updating a client_rolemapping, where possible provide the role ID to the module. This removes a lookup to the API
to translate the name into the role ID.
attributes:
@@ -39,9 +39,9 @@ options:
state:
description:
- State of the client_rolemapping.
- - On V(present), the client_rolemapping will be created if it does not yet exist, or updated with the parameters you
- provide.
- - On V(absent), the client_rolemapping will be removed if it exists.
+ - On V(present), the client_rolemapping is created if it does not yet exist, or updated with the parameters
+ you provide.
+ - On V(absent), the client_rolemapping is removed if it exists.
default: 'present'
type: str
choices:
@@ -87,8 +87,8 @@ options:
type: str
description:
- ID of the group to be mapped.
- - This parameter is not required for updating or deleting the rolemapping but providing it will reduce the number of
- API calls required.
+ - This parameter is not required for updating or deleting the rolemapping but providing it reduces the number of API
+ calls required.
client_id:
type: str
description:
@@ -98,8 +98,8 @@ options:
type: str
description:
- ID of the client to be mapped.
- - This parameter is not required for updating or deleting the rolemapping but providing it will reduce the number of
- API calls required.
+ - This parameter is not required for updating or deleting the rolemapping but providing it reduces the number of API
+ calls required.
roles:
description:
- Roles to be mapped to the group.
@@ -115,8 +115,8 @@ options:
type: str
description:
- The unique identifier for this role_representation.
- - This parameter is not required for updating or deleting a role_representation but providing it will reduce the
- number of API calls required.
+ - This parameter is not required for updating or deleting a role_representation but providing it reduces the number
+ of API calls required.
extends_documentation_fragment:
- community.general.keycloak
- community.general.keycloak.actiongroup_keycloak
@@ -209,7 +209,7 @@ proposed:
description: Representation of proposed client role mapping.
returned: always
type: dict
- sample: {clientId: "test"}
+ sample: {"clientId": "test"}
existing:
description:
@@ -217,7 +217,13 @@ existing:
- The sample is truncated.
returned: always
type: dict
- sample: {"adminUrl": "http://www.example.com/admin_url", "attributes": {"request.object.signature.alg": "RS256"}}
+ sample:
+ {
+ "adminUrl": "http://www.example.com/admin_url",
+ "attributes": {
+ "request.object.signature.alg": "RS256"
+ }
+ }
end_state:
description:
@@ -225,7 +231,13 @@ end_state:
- The sample is truncated.
returned: on success
type: dict
- sample: {"adminUrl": "http://www.example.com/admin_url", "attributes": {"request.object.signature.alg": "RS256"}}
+ sample:
+ {
+ "adminUrl": "http://www.example.com/admin_url",
+ "attributes": {
+ "request.object.signature.alg": "RS256"
+ }
+ }
"""
from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import (
@@ -268,8 +280,8 @@ def main():
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True,
- required_one_of=([['token', 'auth_realm', 'auth_username', 'auth_password']]),
- required_together=([['auth_realm', 'auth_username', 'auth_password']]),
+ required_one_of=([['token', 'auth_realm', 'auth_username', 'auth_password', 'auth_client_id', 'auth_client_secret']]),
+ required_together=([['auth_username', 'auth_password']]),
required_by={'refresh_token': 'auth_realm'},
)
diff --git a/plugins/modules/keycloak_client_rolescope.py b/plugins/modules/keycloak_client_rolescope.py
index 7c87c0664c..fcf57c2e4a 100644
--- a/plugins/modules/keycloak_client_rolescope.py
+++ b/plugins/modules/keycloak_client_rolescope.py
@@ -22,9 +22,9 @@ description:
In a default Keycloak installation, admin-cli and an admin user would work, as would a separate client definition with
the scope tailored to your needs and a user having the expected roles.
- Client O(client_id) must have O(community.general.keycloak_client#module:full_scope_allowed) set to V(false).
- - Attributes are multi-valued in the Keycloak API. All attributes are lists of individual values and will be returned that
- way by this module. You may pass single values for attributes when calling the module, and this will be translated into
- a list suitable for the API.
+ - Attributes are multi-valued in the Keycloak API. All attributes are lists of individual values and are returned that way
+ by this module. You may pass single values for attributes when calling the module, and this is translated into a list
+ suitable for the API.
attributes:
check_mode:
support: full
@@ -37,8 +37,8 @@ options:
state:
description:
- State of the role mapping.
- - On V(present), all roles in O(role_names) will be mapped if not exists yet.
- - On V(absent), all roles mapping in O(role_names) will be removed if it exists.
+ - On V(present), all roles in O(role_names) are mapped if not exist yet.
+ - On V(absent), all roles mapping in O(role_names) are removed if it exists.
default: 'present'
type: str
choices:
@@ -126,11 +126,12 @@ msg:
sample: "Client role scope for frontend-client-public has been updated"
end_state:
- description: Representation of role role scope after module execution.
- returned: on success
- type: list
- elements: dict
- sample: [
+ description: Representation of role role scope after module execution.
+ returned: on success
+ type: list
+ elements: dict
+ sample:
+ [
{
"clientRole": false,
"composite": false,
diff --git a/plugins/modules/keycloak_clientscope.py b/plugins/modules/keycloak_clientscope.py
index 4c452d4f2e..ddb4e1b04b 100644
--- a/plugins/modules/keycloak_clientscope.py
+++ b/plugins/modules/keycloak_clientscope.py
@@ -22,9 +22,9 @@ description:
the scope tailored to your needs and a user having the expected roles.
- The names of module options are snake_cased versions of the camelCase ones found in the Keycloak API and its documentation
at U(https://www.keycloak.org/docs-api/8.0/rest-api/index.html).
- - Attributes are multi-valued in the Keycloak API. All attributes are lists of individual values and will be returned that
- way by this module. You may pass single values for attributes when calling the module, and this will be translated into
- a list suitable for the API.
+ - Attributes are multi-valued in the Keycloak API. All attributes are lists of individual values and are returned that way
+ by this module. You may pass single values for attributes when calling the module, and this is translated into a list
+ suitable for the API.
- When updating a client_scope, where possible provide the client_scope ID to the module. This removes a lookup to the API
to translate the name into the client_scope ID.
attributes:
@@ -39,8 +39,8 @@ options:
state:
description:
- State of the client_scope.
- - On V(present), the client_scope will be created if it does not yet exist, or updated with the parameters you provide.
- - On V(absent), the client_scope will be removed if it exists.
+ - On V(present), the client_scope is created if it does not yet exist, or updated with the parameters you provide.
+ - On V(absent), the client_scope is removed if it exists.
default: 'present'
type: str
choices:
@@ -62,8 +62,8 @@ options:
type: str
description:
- The unique identifier for this client_scope.
- - This parameter is not required for updating or deleting a client_scope but providing it will reduce the number of
- API calls required.
+ - This parameter is not required for updating or deleting a client_scope but providing it reduces the number of API
+ calls required.
description:
type: str
description:
@@ -263,19 +263,31 @@ proposed:
description: Representation of proposed client scope.
returned: always
type: dict
- sample: {clientId: "test"}
+ sample: {"clientId": "test"}
existing:
description: Representation of existing client scope (sample is truncated).
returned: always
type: dict
- sample: {"adminUrl": "http://www.example.com/admin_url", "attributes": {"request.object.signature.alg": "RS256"}}
+ sample:
+ {
+ "adminUrl": "http://www.example.com/admin_url",
+ "attributes": {
+ "request.object.signature.alg": "RS256"
+ }
+ }
end_state:
description: Representation of client scope after module execution (sample is truncated).
returned: on success
type: dict
- sample: {"adminUrl": "http://www.example.com/admin_url", "attributes": {"request.object.signature.alg": "RS256"}}
+ sample:
+ {
+ "adminUrl": "http://www.example.com/admin_url",
+ "attributes": {
+ "request.object.signature.alg": "RS256"
+ }
+ }
"""
from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, camel, \
@@ -354,8 +366,8 @@ def main():
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True,
required_one_of=([['id', 'name'],
- ['token', 'auth_realm', 'auth_username', 'auth_password']]),
- required_together=([['auth_realm', 'auth_username', 'auth_password']]),
+ ['token', 'auth_realm', 'auth_username', 'auth_password', 'auth_client_id', 'auth_client_secret']]),
+ required_together=([['auth_username', 'auth_password']]),
required_by={'refresh_token': 'auth_realm'},
)
diff --git a/plugins/modules/keycloak_clientscope_type.py b/plugins/modules/keycloak_clientscope_type.py
index 0e742f676c..85308f1a22 100644
--- a/plugins/modules/keycloak_clientscope_type.py
+++ b/plugins/modules/keycloak_clientscope_type.py
@@ -99,20 +99,43 @@ proposed:
description: Representation of proposed client-scope types mapping.
returned: always
type: dict
- sample: {default_clientscopes: ["profile", "role"], optional_clientscopes: []}
+ sample:
+ {
+ "default_clientscopes": [
+ "profile",
+ "role"
+ ],
+ "optional_clientscopes": []
+ }
existing:
description:
- Representation of client scopes before module execution.
returned: always
type: dict
- sample: {default_clientscopes: ["profile", "role"], optional_clientscopes: ["phone"]}
+ sample:
+ {
+ "default_clientscopes": [
+ "profile",
+ "role"
+ ],
+ "optional_clientscopes": [
+ "phone"
+ ]
+ }
end_state:
description:
- Representation of client scopes after module execution.
- The sample is truncated.
returned: on success
type: dict
- sample: {default_clientscopes: ["profile", "role"], optional_clientscopes: []}
+ sample:
+ {
+ "default_clientscopes": [
+ "profile",
+ "role"
+ ],
+ "optional_clientscopes": []
+ }
"""
from ansible.module_utils.basic import AnsibleModule
@@ -145,10 +168,10 @@ def keycloak_clientscope_type_module():
argument_spec=argument_spec,
supports_check_mode=True,
required_one_of=([
- ['token', 'auth_realm', 'auth_username', 'auth_password'],
+ ['token', 'auth_realm', 'auth_username', 'auth_password', 'auth_client_id', 'auth_client_secret'],
['default_clientscopes', 'optional_clientscopes']
]),
- required_together=([['auth_realm', 'auth_username', 'auth_password']]),
+ required_together=([['auth_username', 'auth_password']]),
required_by={'refresh_token': 'auth_realm'},
mutually_exclusive=[
['token', 'auth_realm'],
diff --git a/plugins/modules/keycloak_clientsecret_info.py b/plugins/modules/keycloak_clientsecret_info.py
index da07d03248..0ea48f6a33 100644
--- a/plugins/modules/keycloak_clientsecret_info.py
+++ b/plugins/modules/keycloak_clientsecret_info.py
@@ -39,8 +39,8 @@ options:
id:
description:
- The unique identifier for this client.
- - This parameter is not required for getting or generating a client secret but providing it will reduce the number of
- API calls required.
+ - This parameter is not required for getting or generating a client secret but providing it reduces the number of API
+ calls required.
type: str
client_id:
diff --git a/plugins/modules/keycloak_clientsecret_regenerate.py b/plugins/modules/keycloak_clientsecret_regenerate.py
index bb449abc10..2bcaeb3705 100644
--- a/plugins/modules/keycloak_clientsecret_regenerate.py
+++ b/plugins/modules/keycloak_clientsecret_regenerate.py
@@ -43,8 +43,8 @@ options:
id:
description:
- The unique identifier for this client.
- - This parameter is not required for getting or generating a client secret but providing it will reduce the number of
- API calls required.
+ - This parameter is not required for getting or generating a client secret but providing it reduces the number of API
+ calls required.
type: str
client_id:
diff --git a/plugins/modules/keycloak_clienttemplate.py b/plugins/modules/keycloak_clienttemplate.py
index ae6e61380e..ee357605f1 100644
--- a/plugins/modules/keycloak_clienttemplate.py
+++ b/plugins/modules/keycloak_clienttemplate.py
@@ -35,8 +35,8 @@ options:
state:
description:
- State of the client template.
- - On V(present), the client template will be created (or updated if it exists already).
- - On V(absent), the client template will be removed if it exists.
+ - On V(present), the client template is created (or updated if it exists already).
+ - On V(absent), the client template is removed if it exists.
choices: ['present', 'absent']
default: 'present'
type: str
@@ -238,21 +238,33 @@ proposed:
description: Representation of proposed client template.
returned: always
type: dict
- sample: {name: "test01"}
+ sample: {"name": "test01"}
existing:
description: Representation of existing client template (sample is truncated).
returned: always
type: dict
- sample: {"description": "test01", "fullScopeAllowed": false, "id": "9c3712ab-decd-481e-954f-76da7b006e5f", "name": "test01",
- "protocol": "saml"}
+ sample:
+ {
+ "description": "test01",
+ "fullScopeAllowed": false,
+ "id": "9c3712ab-decd-481e-954f-76da7b006e5f",
+ "name": "test01",
+ "protocol": "saml"
+ }
end_state:
description: Representation of client template after module execution (sample is truncated).
returned: on success
type: dict
- sample: {"description": "test01", "fullScopeAllowed": false, "id": "9c3712ab-decd-481e-954f-76da7b006e5f", "name": "test01",
- "protocol": "saml"}
+ sample:
+ {
+ "description": "test01",
+ "fullScopeAllowed": false,
+ "id": "9c3712ab-decd-481e-954f-76da7b006e5f",
+ "name": "test01",
+ "protocol": "saml"
+ }
"""
from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, camel, \
@@ -296,8 +308,8 @@ def main():
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True,
required_one_of=([['id', 'name'],
- ['token', 'auth_realm', 'auth_username', 'auth_password']]),
- required_together=([['auth_realm', 'auth_username', 'auth_password']]),
+ ['token', 'auth_realm', 'auth_username', 'auth_password', 'auth_client_id', 'auth_client_secret']]),
+ required_together=([['auth_username', 'auth_password']]),
required_by={'refresh_token': 'auth_realm'},
)
diff --git a/plugins/modules/keycloak_component.py b/plugins/modules/keycloak_component.py
index d5a3be2a8e..8b0c67b321 100644
--- a/plugins/modules/keycloak_component.py
+++ b/plugins/modules/keycloak_component.py
@@ -35,8 +35,8 @@ options:
state:
description:
- State of the Keycloak component.
- - On V(present), the component will be created (or updated if it exists already).
- - On V(absent), the component will be removed if it exists.
+ - On V(present), the component is created (or updated if it exists already).
+ - On V(absent), the component is removed if it exists.
choices: ['present', 'absent']
default: 'present'
type: str
@@ -155,8 +155,8 @@ def main():
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True,
- required_one_of=([['token', 'auth_realm', 'auth_username', 'auth_password']]),
- required_together=([['auth_realm', 'auth_username', 'auth_password']]),
+ required_one_of=([['token', 'auth_realm', 'auth_username', 'auth_password', 'auth_client_id', 'auth_client_secret']]),
+ required_together=([['auth_username', 'auth_password']]),
required_by={'refresh_token': 'auth_realm'},
)
diff --git a/plugins/modules/keycloak_group.py b/plugins/modules/keycloak_group.py
index 08d2555745..7053b33a35 100644
--- a/plugins/modules/keycloak_group.py
+++ b/plugins/modules/keycloak_group.py
@@ -20,9 +20,9 @@ description:
scope tailored to your needs and a user having the expected roles.
- The names of module options are snake_cased versions of the camelCase ones found in the Keycloak API and its documentation
at U(https://www.keycloak.org/docs-api/20.0.2/rest-api/index.html).
- - Attributes are multi-valued in the Keycloak API. All attributes are lists of individual values and will be returned that
- way by this module. You may pass single values for attributes when calling the module, and this will be translated into
- a list suitable for the API.
+ - Attributes are multi-valued in the Keycloak API. All attributes are lists of individual values and are returned that way
+ by this module. You may pass single values for attributes when calling the module, and this is translated into a list
+ suitable for the API.
- When updating a group, where possible provide the group ID to the module. This removes a lookup to the API to translate
the name into the group ID.
attributes:
@@ -37,9 +37,9 @@ options:
state:
description:
- State of the group.
- - On V(present), the group will be created if it does not yet exist, or updated with the parameters you provide.
- - On V(absent), the group will be removed if it exists. Be aware that absenting a group with subgroups will automatically
- delete all its subgroups too.
+ - On V(present), the group is created if it does not yet exist, or updated with the parameters you provide.
+ - On V(absent), the group is removed if it exists. Be aware that absenting a group with subgroups automatically deletes
+ all its subgroups too.
default: 'present'
type: str
choices:
@@ -61,8 +61,7 @@ options:
type: str
description:
- The unique identifier for this group.
- - This parameter is not required for updating or deleting a group but providing it will reduce the number of API calls
- required.
+ - This parameter is not required for updating or deleting a group but providing it reduces the number of API calls required.
attributes:
type: dict
description:
@@ -282,8 +281,7 @@ end_state:
returned: always
sample: []
subGroups:
- description: A list of groups that are children of this group. These groups will have the same parameters as documented
- here.
+ description: A list of groups that are children of this group. These groups have the same parameters as documented here.
type: list
returned: always
clientRoles:
@@ -334,8 +332,8 @@ def main():
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True,
required_one_of=([['id', 'name'],
- ['token', 'auth_realm', 'auth_username', 'auth_password']]),
- required_together=([['auth_realm', 'auth_username', 'auth_password']]),
+ ['token', 'auth_realm', 'auth_username', 'auth_password', 'auth_client_id', 'auth_client_secret']]),
+ required_together=([['auth_username', 'auth_password']]),
required_by={'refresh_token': 'auth_realm'},
)
diff --git a/plugins/modules/keycloak_identity_provider.py b/plugins/modules/keycloak_identity_provider.py
index 68a31a227b..40a06846d6 100644
--- a/plugins/modules/keycloak_identity_provider.py
+++ b/plugins/modules/keycloak_identity_provider.py
@@ -34,9 +34,8 @@ options:
state:
description:
- State of the identity provider.
- - On V(present), the identity provider will be created if it does not yet exist, or updated with the parameters you
- provide.
- - On V(absent), the identity provider will be removed if it exists.
+ - On V(present), the identity provider is created if it does not yet exist, or updated with the parameters you provide.
+ - On V(absent), the identity provider is removed if it exists.
default: 'present'
type: str
choices:
@@ -148,14 +147,14 @@ options:
sync_mode:
description:
- - Default sync mode for all mappers. The sync mode determines when user data will be synced using the mappers.
+ - Default sync mode for all mappers. The sync mode determines when user data is synced using the mappers.
aliases:
- syncMode
type: str
issuer:
description:
- - The issuer identifier for the issuer of the response. If not provided, no validation will be performed.
+ - The issuer identifier for the issuer of the response. If not provided, no validation is performed.
type: str
authorizationUrl:
@@ -205,7 +204,7 @@ options:
useJwksUrl:
description:
- - If the switch is on, identity provider public keys will be downloaded from given JWKS URL.
+ - If V(true), identity provider public keys are downloaded from given JWKS URL.
type: bool
jwksUrl:
@@ -215,7 +214,7 @@ options:
entityId:
description:
- - The Entity ID that will be used to uniquely identify this SAML Service Provider.
+ - The Entity ID that is used to uniquely identify this SAML Service Provider.
type: str
singleSignOnServiceUrl:
@@ -354,76 +353,79 @@ msg:
sample: "Identity provider my-idp has been created"
proposed:
- description: Representation of proposed identity provider.
- returned: always
- type: dict
- sample: {
- "config": {
- "authorizationUrl": "https://idp.example.com/auth",
- "clientAuthMethod": "client_secret_post",
- "clientId": "my-client",
- "clientSecret": "secret",
- "issuer": "https://idp.example.com",
- "tokenUrl": "https://idp.example.com/token",
- "userInfoUrl": "https://idp.example.com/userinfo"
- },
- "displayName": "OpenID Connect IdP",
- "providerId": "oidc"
+ description: Representation of proposed identity provider.
+ returned: always
+ type: dict
+ sample:
+ {
+ "config": {
+ "authorizationUrl": "https://idp.example.com/auth",
+ "clientAuthMethod": "client_secret_post",
+ "clientId": "my-client",
+ "clientSecret": "secret",
+ "issuer": "https://idp.example.com",
+ "tokenUrl": "https://idp.example.com/token",
+ "userInfoUrl": "https://idp.example.com/userinfo"
+ },
+ "displayName": "OpenID Connect IdP",
+ "providerId": "oidc"
}
existing:
- description: Representation of existing identity provider.
- returned: always
- type: dict
- sample: {
- "addReadTokenRoleOnCreate": false,
- "alias": "my-idp",
- "authenticateByDefault": false,
- "config": {
- "authorizationUrl": "https://old.example.com/auth",
- "clientAuthMethod": "client_secret_post",
- "clientId": "my-client",
- "clientSecret": "**********",
- "issuer": "https://old.example.com",
- "syncMode": "FORCE",
- "tokenUrl": "https://old.example.com/token",
- "userInfoUrl": "https://old.example.com/userinfo"
- },
- "displayName": "OpenID Connect IdP",
- "enabled": true,
- "firstBrokerLoginFlowAlias": "first broker login",
- "internalId": "4d28d7e3-1b80-45bb-8a30-5822bf55aa1c",
- "linkOnly": false,
- "providerId": "oidc",
- "storeToken": false,
- "trustEmail": false,
+ description: Representation of existing identity provider.
+ returned: always
+ type: dict
+ sample:
+ {
+ "addReadTokenRoleOnCreate": false,
+ "alias": "my-idp",
+ "authenticateByDefault": false,
+ "config": {
+ "authorizationUrl": "https://old.example.com/auth",
+ "clientAuthMethod": "client_secret_post",
+ "clientId": "my-client",
+ "clientSecret": "**********",
+ "issuer": "https://old.example.com",
+ "syncMode": "FORCE",
+ "tokenUrl": "https://old.example.com/token",
+ "userInfoUrl": "https://old.example.com/userinfo"
+ },
+ "displayName": "OpenID Connect IdP",
+ "enabled": true,
+ "firstBrokerLoginFlowAlias": "first broker login",
+ "internalId": "4d28d7e3-1b80-45bb-8a30-5822bf55aa1c",
+ "linkOnly": false,
+ "providerId": "oidc",
+ "storeToken": false,
+ "trustEmail": false
}
end_state:
- description: Representation of identity provider after module execution.
- returned: on success
- type: dict
- sample: {
- "addReadTokenRoleOnCreate": false,
- "alias": "my-idp",
- "authenticateByDefault": false,
- "config": {
- "authorizationUrl": "https://idp.example.com/auth",
- "clientAuthMethod": "client_secret_post",
- "clientId": "my-client",
- "clientSecret": "**********",
- "issuer": "https://idp.example.com",
- "tokenUrl": "https://idp.example.com/token",
- "userInfoUrl": "https://idp.example.com/userinfo"
- },
- "displayName": "OpenID Connect IdP",
- "enabled": true,
- "firstBrokerLoginFlowAlias": "first broker login",
- "internalId": "4d28d7e3-1b80-45bb-8a30-5822bf55aa1c",
- "linkOnly": false,
- "providerId": "oidc",
- "storeToken": false,
- "trustEmail": false,
+ description: Representation of identity provider after module execution.
+ returned: on success
+ type: dict
+ sample:
+ {
+ "addReadTokenRoleOnCreate": false,
+ "alias": "my-idp",
+ "authenticateByDefault": false,
+ "config": {
+ "authorizationUrl": "https://idp.example.com/auth",
+ "clientAuthMethod": "client_secret_post",
+ "clientId": "my-client",
+ "clientSecret": "**********",
+ "issuer": "https://idp.example.com",
+ "tokenUrl": "https://idp.example.com/token",
+ "userInfoUrl": "https://idp.example.com/userinfo"
+ },
+ "displayName": "OpenID Connect IdP",
+ "enabled": true,
+ "firstBrokerLoginFlowAlias": "first broker login",
+ "internalId": "4d28d7e3-1b80-45bb-8a30-5822bf55aa1c",
+ "linkOnly": false,
+ "providerId": "oidc",
+ "storeToken": false,
+ "trustEmail": false
}
"""
@@ -497,8 +499,8 @@ def main():
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True,
- required_one_of=([['token', 'auth_realm', 'auth_username', 'auth_password']]),
- required_together=([['auth_realm', 'auth_username', 'auth_password']]),
+ required_one_of=([['token', 'auth_realm', 'auth_username', 'auth_password', 'auth_client_id', 'auth_client_secret']]),
+ required_together=([['auth_username', 'auth_password']]),
required_by={'refresh_token': 'auth_realm'},
)
diff --git a/plugins/modules/keycloak_realm.py b/plugins/modules/keycloak_realm.py
index 6d896d4141..c8bc7dc7df 100644
--- a/plugins/modules/keycloak_realm.py
+++ b/plugins/modules/keycloak_realm.py
@@ -39,8 +39,8 @@ options:
state:
description:
- State of the realm.
- - On V(present), the realm will be created (or updated if it exists already).
- - On V(absent), the realm will be removed if it exists.
+ - On V(present), the realm is created (or updated if it exists already).
+ - On V(absent), the realm is removed if it exists.
choices: ['present', 'absent']
default: 'present'
type: str
@@ -553,19 +553,31 @@ proposed:
description: Representation of proposed realm.
returned: always
type: dict
- sample: {realm: "test"}
+ sample: {"realm": "test"}
existing:
description: Representation of existing realm (sample is truncated).
returned: always
type: dict
- sample: {"adminUrl": "http://www.example.com/admin_url", "attributes": {"request.object.signature.alg": "RS256"}}
+ sample:
+ {
+ "adminUrl": "http://www.example.com/admin_url",
+ "attributes": {
+ "request.object.signature.alg": "RS256"
+ }
+ }
end_state:
description: Representation of realm after module execution (sample is truncated).
returned: on success
type: dict
- sample: {"adminUrl": "http://www.example.com/admin_url", "attributes": {"request.object.signature.alg": "RS256"}}
+ sample:
+ {
+ "adminUrl": "http://www.example.com/admin_url",
+ "attributes": {
+ "request.object.signature.alg": "RS256"
+ }
+ }
"""
from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, camel, \
@@ -705,8 +717,8 @@ def main():
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True,
required_one_of=([['id', 'realm', 'enabled'],
- ['token', 'auth_realm', 'auth_username', 'auth_password']]),
- required_together=([['auth_realm', 'auth_username', 'auth_password']]),
+ ['token', 'auth_realm', 'auth_username', 'auth_password', 'auth_client_id', 'auth_client_secret']]),
+ required_together=([['auth_username', 'auth_password']]),
required_by={'refresh_token': 'auth_realm'},
)
diff --git a/plugins/modules/keycloak_realm_info.py b/plugins/modules/keycloak_realm_info.py
index 838b19513d..501ca48c21 100644
--- a/plugins/modules/keycloak_realm_info.py
+++ b/plugins/modules/keycloak_realm_info.py
@@ -19,9 +19,9 @@ description:
- This module allows you to get Keycloak realm public information using the Keycloak REST API.
- The names of module options are snake_cased versions of the camelCase ones found in the Keycloak API and its documentation
at U(https://www.keycloak.org/docs-api/8.0/rest-api/index.html).
- - Attributes are multi-valued in the Keycloak API. All attributes are lists of individual values and will be returned that
- way by this module. You may pass single values for attributes when calling the module, and this will be translated into
- a list suitable for the API.
+ - Attributes are multi-valued in the Keycloak API. All attributes are lists of individual values and are returned that way
+ by this module. You may pass single values for attributes when calling the module, and this is translated into a list
+ suitable for the API.
extends_documentation_fragment:
- community.general.attributes
- community.general.attributes.info_module
diff --git a/plugins/modules/keycloak_realm_key.py b/plugins/modules/keycloak_realm_key.py
index 97e0af6da5..dbb284ec4b 100644
--- a/plugins/modules/keycloak_realm_key.py
+++ b/plugins/modules/keycloak_realm_key.py
@@ -25,11 +25,9 @@ description:
at U(https://www.keycloak.org/docs-api/8.0/rest-api/index.html). Aliases are provided so camelCased versions can be used
as well.
- This module is unable to detect changes to the actual cryptographic key after importing it. However, if some other property
- is changed alongside the cryptographic key, then the key will also get changed as a side-effect, as the JSON payload needs
- to include the private key. This can be considered either a bug or a feature, as the alternative would be to always update
- the realm key whether it has changed or not.
- - If certificate is not explicitly provided it will be dynamically created by Keycloak. Therefore comparing the current
- state of the certificate to the desired state (which may be empty) is not possible.
+ is changed alongside the cryptographic key, then the key also changes as a side-effect, as the JSON payload needs to include
+ the private key. This can be considered either a bug or a feature, as the alternative would be to always update the realm
+ key whether it has changed or not.
attributes:
check_mode:
support: full
@@ -42,8 +40,8 @@ options:
state:
description:
- State of the keycloak realm key.
- - On V(present), the realm key will be created (or updated if it exists already).
- - On V(absent), the realm key will be removed if it exists.
+ - On V(present), the realm key is created (or updated if it exists already).
+ - On V(absent), the realm key is removed if it exists.
choices: ['present', 'absent']
default: 'present'
type: str
@@ -119,10 +117,10 @@ options:
notes:
- Current value of the private key cannot be fetched from Keycloak. Therefore comparing its desired state to the current
state is not possible.
- - If certificate is not explicitly provided it will be dynamically created by Keycloak. Therefore comparing the current
+ - If O(config.certificate) is not explicitly provided it is dynamically created by Keycloak. Therefore comparing the current
state of the certificate to the desired state (which may be empty) is not possible.
- Due to the private key and certificate options the module is B(not fully idempotent). You can use O(force=true) to force
- the module to always update if you know that the private key might have changed.
+ the module to ensure updating if you know that the private key might have changed.
extends_documentation_fragment:
- community.general.keycloak
- community.general.keycloak.actiongroup_keycloak
@@ -208,7 +206,21 @@ end_state:
description: Realm key configuration.
type: dict
returned: when O(state=present)
- sample: {"active": ["true"], "algorithm": ["RS256"], "enabled": ["true"], "priority": ["140"]}
+ sample:
+ {
+ "active": [
+ "true"
+ ],
+ "algorithm": [
+ "RS256"
+ ],
+ "enabled": [
+ "true"
+ ],
+ "priority": [
+ "140"
+ ]
+ }
"""
from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, camel, \
@@ -263,8 +275,8 @@ def main():
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True,
- required_one_of=([['token', 'auth_realm', 'auth_username', 'auth_password']]),
- required_together=([['auth_realm', 'auth_username', 'auth_password']]),
+ required_one_of=([['token', 'auth_realm', 'auth_username', 'auth_password', 'auth_client_id', 'auth_client_secret']]),
+ required_together=([['auth_username', 'auth_password']]),
required_by={'refresh_token': 'auth_realm'},
)
diff --git a/plugins/modules/keycloak_realm_keys_metadata_info.py b/plugins/modules/keycloak_realm_keys_metadata_info.py
index 9946bd88ba..8340c8f2a5 100644
--- a/plugins/modules/keycloak_realm_keys_metadata_info.py
+++ b/plugins/modules/keycloak_realm_keys_metadata_info.py
@@ -104,8 +104,8 @@ def main():
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
- required_one_of=([["token", "auth_realm", "auth_username", "auth_password"]]),
- required_together=([['auth_realm', 'auth_username', 'auth_password']]),
+ required_one_of=([['token', 'auth_realm', 'auth_username', 'auth_password', 'auth_client_id', 'auth_client_secret']]),
+ required_together=([['auth_username', 'auth_password']]),
required_by={'refresh_token': 'auth_realm'},
)
diff --git a/plugins/modules/keycloak_realm_rolemapping.py b/plugins/modules/keycloak_realm_rolemapping.py
index 2937ed0ec0..2b6b6a4eda 100644
--- a/plugins/modules/keycloak_realm_rolemapping.py
+++ b/plugins/modules/keycloak_realm_rolemapping.py
@@ -22,9 +22,9 @@ description:
definition with the scope tailored to your needs and a user having the expected roles.
- The names of module options are snake_cased versions of the camelCase ones found in the Keycloak API and its documentation
at U(https://www.keycloak.org/docs-api/18.0/rest-api/index.html).
- - Attributes are multi-valued in the Keycloak API. All attributes are lists of individual values and will be returned that
- way by this module. You may pass single values for attributes when calling the module, and this will be translated into
- a list suitable for the API.
+ - Attributes are multi-valued in the Keycloak API. All attributes are lists of individual values and are returned that way
+ by this module. You may pass single values for attributes when calling the module, and this is translated into a list
+ suitable for the API.
- When updating a group_rolemapping, where possible provide the role ID to the module. This removes a lookup to the API
to translate the name into the role ID.
attributes:
@@ -39,9 +39,8 @@ options:
state:
description:
- State of the realm_rolemapping.
- - On C(present), the realm_rolemapping will be created if it does not yet exist, or updated with the parameters you
- provide.
- - On C(absent), the realm_rolemapping will be removed if it exists.
+ - On C(present), the realm_rolemapping is created if it does not yet exist, or updated with the parameters you provide.
+ - On C(absent), the realm_rolemapping is removed if it exists.
default: 'present'
type: str
choices:
@@ -86,8 +85,8 @@ options:
type: str
description:
- ID of the group to be mapped.
- - This parameter is not required for updating or deleting the rolemapping but providing it will reduce the number of
- API calls required.
+ - This parameter is not required for updating or deleting the rolemapping but providing it reduces the number of API
+ calls required.
roles:
description:
- Roles to be mapped to the group.
@@ -103,8 +102,8 @@ options:
type: str
description:
- The unique identifier for this role_representation.
- - This parameter is not required for updating or deleting a role_representation but providing it will reduce the
- number of API calls required.
+ - This parameter is not required for updating or deleting a role_representation but providing it reduces the number
+ of API calls required.
extends_documentation_fragment:
- community.general.keycloak
- community.general.keycloak.actiongroup_keycloak
@@ -195,7 +194,7 @@ proposed:
description: Representation of proposed client role mapping.
returned: always
type: dict
- sample: {clientId: "test"}
+ sample: {"clientId": "test"}
existing:
description:
@@ -203,7 +202,13 @@ existing:
- The sample is truncated.
returned: always
type: dict
- sample: {"adminUrl": "http://www.example.com/admin_url", "attributes": {"request.object.signature.alg": "RS256"}}
+ sample:
+ {
+ "adminUrl": "http://www.example.com/admin_url",
+ "attributes": {
+ "request.object.signature.alg": "RS256"
+ }
+ }
end_state:
description:
@@ -211,7 +216,13 @@ end_state:
- The sample is truncated.
returned: on success
type: dict
- sample: {"adminUrl": "http://www.example.com/admin_url", "attributes": {"request.object.signature.alg": "RS256"}}
+ sample:
+ {
+ "adminUrl": "http://www.example.com/admin_url",
+ "attributes": {
+ "request.object.signature.alg": "RS256"
+ }
+ }
"""
from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import (
@@ -252,8 +263,8 @@ def main():
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True,
- required_one_of=([['token', 'auth_realm', 'auth_username', 'auth_password']]),
- required_together=([['auth_realm', 'auth_username', 'auth_password']]),
+ required_one_of=([['token', 'auth_realm', 'auth_username', 'auth_password', 'auth_client_id', 'auth_client_secret']]),
+ required_together=([['auth_username', 'auth_password']]),
required_by={'refresh_token': 'auth_realm'},
)
diff --git a/plugins/modules/keycloak_role.py b/plugins/modules/keycloak_role.py
index 93705e2b4e..5b706354ed 100644
--- a/plugins/modules/keycloak_role.py
+++ b/plugins/modules/keycloak_role.py
@@ -22,9 +22,9 @@ description:
scope tailored to your needs and a user having the expected roles.
- The names of module options are snake_cased versions of the camelCase ones found in the Keycloak API and its documentation
at U(https://www.keycloak.org/docs-api/8.0/rest-api/index.html).
- - Attributes are multi-valued in the Keycloak API. All attributes are lists of individual values and will be returned that
- way by this module. You may pass single values for attributes when calling the module, and this will be translated into
- a list suitable for the API.
+ - Attributes are multi-valued in the Keycloak API. All attributes are lists of individual values and are returned that way
+ by this module. You may pass single values for attributes when calling the module, and this is translated into a list
+ suitable for the API.
attributes:
check_mode:
support: full
@@ -37,8 +37,8 @@ options:
state:
description:
- State of the role.
- - On V(present), the role will be created if it does not yet exist, or updated with the parameters you provide.
- - On V(absent), the role will be removed if it exists.
+ - On V(present), the role is created if it does not yet exist, or updated with the parameters you provide.
+ - On V(absent), the role is removed if it exists.
default: 'present'
type: str
choices:
@@ -201,15 +201,31 @@ existing:
description: Representation of existing role.
returned: always
type: dict
- sample: {"attributes": {}, "clientRole": true, "composite": false, "containerId": "9f03eb61-a826-4771-a9fd-930e06d2d36a",
- "description": "My client test role", "id": "561703dd-0f38-45ff-9a5a-0c978f794547", "name": "myrole"}
+ sample:
+ {
+ "attributes": {},
+ "clientRole": true,
+ "composite": false,
+ "containerId": "9f03eb61-a826-4771-a9fd-930e06d2d36a",
+ "description": "My client test role",
+ "id": "561703dd-0f38-45ff-9a5a-0c978f794547",
+ "name": "myrole"
+ }
end_state:
description: Representation of role after module execution (sample is truncated).
returned: on success
type: dict
- sample: {"attributes": {}, "clientRole": true, "composite": false, "containerId": "9f03eb61-a826-4771-a9fd-930e06d2d36a",
- "description": "My updated client test role", "id": "561703dd-0f38-45ff-9a5a-0c978f794547", "name": "myrole"}
+ sample:
+ {
+ "attributes": {},
+ "clientRole": true,
+ "composite": false,
+ "containerId": "9f03eb61-a826-4771-a9fd-930e06d2d36a",
+ "description": "My updated client test role",
+ "id": "561703dd-0f38-45ff-9a5a-0c978f794547",
+ "name": "myrole"
+ }
"""
from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, camel, \
@@ -228,7 +244,7 @@ def main():
composites_spec = dict(
name=dict(type='str', required=True),
- client_id=dict(type='str', aliases=['clientId'], required=False),
+ client_id=dict(type='str', aliases=['clientId']),
state=dict(type='str', default='present', choices=['present', 'absent'])
)
@@ -247,8 +263,8 @@ def main():
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True,
- required_one_of=([['token', 'auth_realm', 'auth_username', 'auth_password']]),
- required_together=([['auth_realm', 'auth_username', 'auth_password']]),
+ required_one_of=([['token', 'auth_realm', 'auth_username', 'auth_password', 'auth_client_id', 'auth_client_secret']]),
+ required_together=([['auth_username', 'auth_password']]),
required_by={'refresh_token': 'auth_realm'},
)
diff --git a/plugins/modules/keycloak_user.py b/plugins/modules/keycloak_user.py
index 71e793ae21..2b3c838483 100644
--- a/plugins/modules/keycloak_user.py
+++ b/plugins/modules/keycloak_user.py
@@ -101,9 +101,9 @@ options:
groups:
description:
- List of groups for the user.
- Groups can be referenced by their name, like V(staff), or their path, like V(/staff/engineering).
- The path syntax allows you to reference subgroups, which is not possible otherwise.
- This is possible since community.general 10.6.0.
+ - Groups can be referenced by their name, like V(staff), or their path, like V(/staff/engineering). The path syntax
+ allows you to reference subgroups, which is not possible otherwise.
+ - Using the path is possible since community.general 10.6.0.
type: list
elements: dict
default: []
@@ -332,11 +332,6 @@ EXAMPLES = r"""
"""
RETURN = r"""
-msg:
- description: Message as to what action was taken.
- returned: always
- type: str
- sample: User f18c709c-03d6-11ee-970b-c74bf2721112 created
proposed:
description: Representation of the proposed user.
returned: on success
@@ -349,10 +344,6 @@ end_state:
description: Representation of the user after module execution.
returned: on success
type: dict
-changed:
- description: Return V(true) if the operation changed the user on the keycloak server, V(false) otherwise.
- returned: always
- type: bool
"""
from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, camel, \
@@ -410,8 +401,8 @@ def main():
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True,
- required_one_of=([['token', 'auth_realm', 'auth_username', 'auth_password']]),
- required_together=([['auth_realm', 'auth_username', 'auth_password']]),
+ required_one_of=([['token', 'auth_realm', 'auth_username', 'auth_password', 'auth_client_id', 'auth_client_secret']]),
+ required_together=([['auth_username', 'auth_password']]),
required_by={'refresh_token': 'auth_realm'},
)
diff --git a/plugins/modules/keycloak_user_federation.py b/plugins/modules/keycloak_user_federation.py
index 78b8b0fdeb..3290ab8dd9 100644
--- a/plugins/modules/keycloak_user_federation.py
+++ b/plugins/modules/keycloak_user_federation.py
@@ -34,8 +34,8 @@ options:
state:
description:
- State of the user federation.
- - On V(present), the user federation will be created if it does not yet exist, or updated with the parameters you provide.
- - On V(absent), the user federation will be removed if it exists.
+ - On V(present), the user federation is created if it does not yet exist, or updated with the parameters you provide.
+ - On V(absent), the user federation is removed if it exists.
default: 'present'
type: str
choices:
@@ -50,7 +50,7 @@ options:
id:
description:
- - The unique ID for this user federation. If left empty, the user federation will be searched by its O(name).
+ - The unique ID for this user federation. If left empty, the user federation is searched by its O(name).
type: str
name:
@@ -76,7 +76,7 @@ options:
parent_id:
description:
- - Unique ID for the parent of this user federation. Realm ID will be automatically used if left blank.
+ - Unique ID for the parent of this user federation. Realm ID is automatically used if left blank.
aliases:
- parentId
type: str
@@ -95,11 +95,11 @@ options:
value with the desired value always evaluates to not equal. This means the before and desired states are never equal
if the parameter is set.
- Set to V(always) to include O(config.bindCredential) in the comparison of before and desired state. Because of the
- redacted value returned by Keycloak the module will always detect a change and make an update if a O(config.bindCredential)
+ redacted value returned by Keycloak the module always detects a change and make an update if a O(config.bindCredential)
value is set.
- Set to V(only_indirect) to exclude O(config.bindCredential) when comparing the before state with the desired state.
- The value of O(config.bindCredential) will only be updated if there are other changes to the user federation that
- require an update.
+ The value of O(config.bindCredential) is only updated if there are other changes to the user federation that require
+ an update.
type: str
default: always
choices:
@@ -129,14 +129,14 @@ options:
importEnabled:
description:
- - If V(true), LDAP users will be imported into Keycloak DB and synced by the configured sync policies.
+ - If V(true), LDAP users are imported into Keycloak DB and synced by the configured sync policies.
default: true
type: bool
editMode:
description:
- - V(READ_ONLY) is a read-only LDAP store. V(WRITABLE) means data will be synced back to LDAP on demand. V(UNSYNCED)
- means user data will be imported, but not synced back to LDAP.
+ - V(READ_ONLY) is a read-only LDAP store. V(WRITABLE) means data is synced back to LDAP on demand. V(UNSYNCED) means
+ user data is imported, but not synced back to LDAP.
type: str
choices:
- READ_ONLY
@@ -181,7 +181,7 @@ options:
userObjectClasses:
description:
- All values of LDAP objectClass attribute for users in LDAP divided by comma. For example V(inetOrgPerson, organizationalPerson).
- Newly created Keycloak users will be written to LDAP with all those object classes and existing LDAP user records
+ Newly created Keycloak users are written to LDAP with all those object classes and existing LDAP user records
are found just if they contain all those object classes.
type: str
@@ -222,7 +222,7 @@ options:
bindDn:
description:
- - DN of LDAP user which will be used by Keycloak to access LDAP server.
+ - DN of LDAP user which is used by Keycloak to access LDAP server.
type: str
bindCredential:
@@ -232,7 +232,7 @@ options:
startTls:
description:
- - Encrypts the connection to LDAP using STARTTLS, which will disable connection pooling.
+ - Encrypts the connection to LDAP using STARTTLS, which disables connection pooling.
default: false
type: bool
@@ -258,11 +258,11 @@ options:
useTruststoreSpi:
description:
- - Specifies whether LDAP connection will use the truststore SPI with the truststore configured in standalone.xml/domain.xml.
- V(always) means that it will always use it. V(never) means that it will not use it. V(ldapsOnly) means that it
- will use if your connection URL use ldaps.
+ - Specifies whether LDAP connection uses the truststore SPI with the truststore configured in standalone.xml/domain.xml.
+ V(always) means that it always uses it. V(never) means that it does not use it. V(ldapsOnly) means that it uses
+ if your connection URL use ldaps.
- Note even if standalone.xml/domain.xml is not configured, the default Java cacerts or certificate specified by
- C(javax.net.ssl.trustStore) property will be used.
+ C(javax.net.ssl.trustStore) property is used.
default: ldapsOnly
type: str
choices:
@@ -335,8 +335,8 @@ options:
allowKerberosAuthentication:
description:
- - Enable/disable HTTP authentication of users with SPNEGO/Kerberos tokens. The data about authenticated users will
- be provisioned from this LDAP server.
+ - Enable/disable HTTP authentication of users with SPNEGO/Kerberos tokens. The data about authenticated users is
+ provisioned from this LDAP server.
default: false
type: bool
@@ -348,9 +348,9 @@ options:
krbPrincipalAttribute:
description:
- Name of the LDAP attribute, which refers to Kerberos principal. This is used to lookup appropriate LDAP user after
- successful Kerberos/SPNEGO authentication in Keycloak. When this is empty, the LDAP user will be looked based
- on LDAP username corresponding to the first part of his Kerberos principal. For instance, for principal C(john@KEYCLOAK.ORG),
- it will assume that LDAP username is V(john).
+ successful Kerberos/SPNEGO authentication in Keycloak. When this is empty, the LDAP user is looked up based on
+ LDAP username corresponding to the first part of his Kerberos principal. For instance, for principal C(john@KEYCLOAK.ORG),
+ it assumes that LDAP username is V(john).
type: str
version_added: 8.1.0
@@ -419,17 +419,17 @@ options:
evictionDay:
description:
- - Day of the week the entry will become invalid on.
+ - Day of the week the entry is set to become invalid on.
type: str
evictionHour:
description:
- - Hour of day the entry will become invalid on.
+ - Hour of day the entry is set to become invalid on.
type: str
evictionMinute:
description:
- - Minute of day the entry will become invalid on.
+ - Minute of day the entry is set to become invalid on.
type: str
maxLifespan:
@@ -461,12 +461,12 @@ options:
name:
description:
- - Name of the mapper. If no ID is given, the mapper will be searched by name.
+ - Name of the mapper. If no ID is given, the mapper is searched by name.
type: str
parentId:
description:
- - Unique ID for the parent of this mapper. ID of the user federation will automatically be used if left blank.
+ - Unique ID for the parent of this mapper. ID of the user federation is automatically used if left blank.
type: str
providerId:
@@ -598,122 +598,125 @@ msg:
sample: "No changes required to user federation 164bb483-c613-482e-80fe-7f1431308799."
proposed:
- description: Representation of proposed user federation.
- returned: always
- type: dict
- sample: {
- "config": {
- "allowKerberosAuthentication": "false",
- "authType": "simple",
- "batchSizeForSync": "1000",
- "bindCredential": "**********",
- "bindDn": "cn=directory reader",
- "cachePolicy": "DEFAULT",
- "connectionPooling": "true",
- "connectionUrl": "ldaps://ldap.example.com:636",
- "debug": "false",
- "editMode": "READ_ONLY",
- "enabled": "true",
- "importEnabled": "true",
- "pagination": "true",
- "priority": "0",
- "rdnLDAPAttribute": "uid",
- "searchScope": "1",
- "syncRegistrations": "false",
- "trustEmail": "false",
- "useKerberosForPasswordAuthentication": "false",
- "useTruststoreSpi": "ldapsOnly",
- "userObjectClasses": "inetOrgPerson, organizationalPerson",
- "usernameLDAPAttribute": "uid",
- "usersDn": "ou=Users,dc=example,dc=com",
- "uuidLDAPAttribute": "entryUUID",
- "validatePasswordPolicy": "false",
- "vendor": "other"
- },
- "name": "ldap",
- "providerId": "ldap",
- "providerType": "org.keycloak.storage.UserStorageProvider"
+ description: Representation of proposed user federation.
+ returned: always
+ type: dict
+ sample:
+ {
+ "config": {
+ "allowKerberosAuthentication": "false",
+ "authType": "simple",
+ "batchSizeForSync": "1000",
+ "bindCredential": "**********",
+ "bindDn": "cn=directory reader",
+ "cachePolicy": "DEFAULT",
+ "connectionPooling": "true",
+ "connectionUrl": "ldaps://ldap.example.com:636",
+ "debug": "false",
+ "editMode": "READ_ONLY",
+ "enabled": "true",
+ "importEnabled": "true",
+ "pagination": "true",
+ "priority": "0",
+ "rdnLDAPAttribute": "uid",
+ "searchScope": "1",
+ "syncRegistrations": "false",
+ "trustEmail": "false",
+ "useKerberosForPasswordAuthentication": "false",
+ "useTruststoreSpi": "ldapsOnly",
+ "userObjectClasses": "inetOrgPerson, organizationalPerson",
+ "usernameLDAPAttribute": "uid",
+ "usersDn": "ou=Users,dc=example,dc=com",
+ "uuidLDAPAttribute": "entryUUID",
+ "validatePasswordPolicy": "false",
+ "vendor": "other"
+ },
+ "name": "ldap",
+ "providerId": "ldap",
+ "providerType": "org.keycloak.storage.UserStorageProvider"
}
existing:
- description: Representation of existing user federation.
- returned: always
- type: dict
- sample: {
- "config": {
- "allowKerberosAuthentication": "false",
- "authType": "simple",
- "batchSizeForSync": "1000",
- "bindCredential": "**********",
- "bindDn": "cn=directory reader",
- "cachePolicy": "DEFAULT",
- "changedSyncPeriod": "-1",
- "connectionPooling": "true",
- "connectionUrl": "ldaps://ldap.example.com:636",
- "debug": "false",
- "editMode": "READ_ONLY",
- "enabled": "true",
- "fullSyncPeriod": "-1",
- "importEnabled": "true",
- "pagination": "true",
- "priority": "0",
- "rdnLDAPAttribute": "uid",
- "searchScope": "1",
- "syncRegistrations": "false",
- "trustEmail": "false",
- "useKerberosForPasswordAuthentication": "false",
- "useTruststoreSpi": "ldapsOnly",
- "userObjectClasses": "inetOrgPerson, organizationalPerson",
- "usernameLDAPAttribute": "uid",
- "usersDn": "ou=Users,dc=example,dc=com",
- "uuidLDAPAttribute": "entryUUID",
- "validatePasswordPolicy": "false",
- "vendor": "other"
- },
- "id": "01122837-9047-4ae4-8ca0-6e2e891a765f",
- "mappers": [
- {
- "config": {
- "always.read.value.from.ldap": "false",
- "is.mandatory.in.ldap": "false",
- "ldap.attribute": "mail",
- "read.only": "true",
- "user.model.attribute": "email"
- },
- "id": "17d60ce2-2d44-4c2c-8b1f-1fba601b9a9f",
- "name": "email",
- "parentId": "01122837-9047-4ae4-8ca0-6e2e891a765f",
- "providerId": "user-attribute-ldap-mapper",
- "providerType": "org.keycloak.storage.ldap.mappers.LDAPStorageMapper"
- }
- ],
- "name": "myfed",
- "parentId": "myrealm",
- "providerId": "ldap",
- "providerType": "org.keycloak.storage.UserStorageProvider"
+ description: Representation of existing user federation.
+ returned: always
+ type: dict
+ sample:
+ {
+ "config": {
+ "allowKerberosAuthentication": "false",
+ "authType": "simple",
+ "batchSizeForSync": "1000",
+ "bindCredential": "**********",
+ "bindDn": "cn=directory reader",
+ "cachePolicy": "DEFAULT",
+ "changedSyncPeriod": "-1",
+ "connectionPooling": "true",
+ "connectionUrl": "ldaps://ldap.example.com:636",
+ "debug": "false",
+ "editMode": "READ_ONLY",
+ "enabled": "true",
+ "fullSyncPeriod": "-1",
+ "importEnabled": "true",
+ "pagination": "true",
+ "priority": "0",
+ "rdnLDAPAttribute": "uid",
+ "searchScope": "1",
+ "syncRegistrations": "false",
+ "trustEmail": "false",
+ "useKerberosForPasswordAuthentication": "false",
+ "useTruststoreSpi": "ldapsOnly",
+ "userObjectClasses": "inetOrgPerson, organizationalPerson",
+ "usernameLDAPAttribute": "uid",
+ "usersDn": "ou=Users,dc=example,dc=com",
+ "uuidLDAPAttribute": "entryUUID",
+ "validatePasswordPolicy": "false",
+ "vendor": "other"
+ },
+ "id": "01122837-9047-4ae4-8ca0-6e2e891a765f",
+ "mappers": [
+ {
+ "config": {
+ "always.read.value.from.ldap": "false",
+ "is.mandatory.in.ldap": "false",
+ "ldap.attribute": "mail",
+ "read.only": "true",
+ "user.model.attribute": "email"
+ },
+ "id": "17d60ce2-2d44-4c2c-8b1f-1fba601b9a9f",
+ "name": "email",
+ "parentId": "01122837-9047-4ae4-8ca0-6e2e891a765f",
+ "providerId": "user-attribute-ldap-mapper",
+ "providerType": "org.keycloak.storage.ldap.mappers.LDAPStorageMapper"
+ }
+ ],
+ "name": "myfed",
+ "parentId": "myrealm",
+ "providerId": "ldap",
+ "providerType": "org.keycloak.storage.UserStorageProvider"
}
end_state:
- description: Representation of user federation after module execution.
- returned: on success
- type: dict
- sample: {
- "config": {
- "allowPasswordAuthentication": "false",
- "cachePolicy": "DEFAULT",
- "enabled": "true",
- "kerberosRealm": "EXAMPLE.COM",
- "keyTab": "/etc/krb5.keytab",
- "priority": "0",
- "serverPrincipal": "HTTP/host.example.com@EXAMPLE.COM",
- "updateProfileFirstLogin": "false"
- },
- "id": "cf52ae4f-4471-4435-a0cf-bb620cadc122",
- "mappers": [],
- "name": "kerberos",
- "parentId": "myrealm",
- "providerId": "kerberos",
- "providerType": "org.keycloak.storage.UserStorageProvider"
+ description: Representation of user federation after module execution.
+ returned: on success
+ type: dict
+ sample:
+ {
+ "config": {
+ "allowPasswordAuthentication": "false",
+ "cachePolicy": "DEFAULT",
+ "enabled": "true",
+ "kerberosRealm": "EXAMPLE.COM",
+ "keyTab": "/etc/krb5.keytab",
+ "priority": "0",
+ "serverPrincipal": "HTTP/host.example.com@EXAMPLE.COM",
+ "updateProfileFirstLogin": "false"
+ },
+ "id": "cf52ae4f-4471-4435-a0cf-bb620cadc122",
+ "mappers": [],
+ "name": "kerberos",
+ "parentId": "myrealm",
+ "providerId": "kerberos",
+ "providerType": "org.keycloak.storage.UserStorageProvider"
}
"""
@@ -838,8 +841,8 @@ def main():
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True,
required_one_of=([['id', 'name'],
- ['token', 'auth_realm', 'auth_username', 'auth_password']]),
- required_together=([['auth_realm', 'auth_username', 'auth_password']]),
+ ['token', 'auth_realm', 'auth_username', 'auth_password', 'auth_client_id', 'auth_client_secret']]),
+ required_together=([['auth_username', 'auth_password']]),
required_by={'refresh_token': 'auth_realm'},
)
diff --git a/plugins/modules/keycloak_user_rolemapping.py b/plugins/modules/keycloak_user_rolemapping.py
index c7af801706..49d71e2ca9 100644
--- a/plugins/modules/keycloak_user_rolemapping.py
+++ b/plugins/modules/keycloak_user_rolemapping.py
@@ -21,9 +21,9 @@ description:
the scope tailored to your needs and a user having the expected roles.
- The names of module options are snake_cased versions of the camelCase ones found in the Keycloak API and its documentation
at U(https://www.keycloak.org/docs-api/8.0/rest-api/index.html).
- - Attributes are multi-valued in the Keycloak API. All attributes are lists of individual values and will be returned that
- way by this module. You may pass single values for attributes when calling the module, and this will be translated into
- a list suitable for the API.
+ - Attributes are multi-valued in the Keycloak API. All attributes are lists of individual values and are returned that way
+ by this module. You may pass single values for attributes when calling the module, and this is translated into a list
+ suitable for the API.
- When updating a user_rolemapping, where possible provide the role ID to the module. This removes a lookup to the API to
translate the name into the role ID.
attributes:
@@ -38,8 +38,8 @@ options:
state:
description:
- State of the user_rolemapping.
- - On V(present), the user_rolemapping will be created if it does not yet exist, or updated with the parameters you provide.
- - On V(absent), the user_rolemapping will be removed if it exists.
+ - On V(present), the user_rolemapping is created if it does not yet exist, or updated with the parameters you provide.
+ - On V(absent), the user_rolemapping is removed if it exists.
default: 'present'
type: str
choices:
@@ -61,14 +61,14 @@ options:
type: str
description:
- ID of the user to be mapped.
- - This parameter is not required for updating or deleting the rolemapping but providing it will reduce the number of
- API calls required.
+ - This parameter is not required for updating or deleting the rolemapping but providing it reduces the number of API
+ calls required.
service_account_user_client_id:
type: str
description:
- Client ID of the service-account-user to be mapped.
- - This parameter is not required for updating or deleting the rolemapping but providing it will reduce the number of
- API calls required.
+ - This parameter is not required for updating or deleting the rolemapping but providing it reduces the number of API
+ calls required.
client_id:
type: str
description:
@@ -79,8 +79,8 @@ options:
type: str
description:
- ID of the client to be mapped.
- - This parameter is not required for updating or deleting the rolemapping but providing it will reduce the number of
- API calls required.
+ - This parameter is not required for updating or deleting the rolemapping but providing it reduces the number of API
+ calls required.
roles:
description:
- Roles to be mapped to the user.
@@ -96,8 +96,8 @@ options:
type: str
description:
- The unique identifier for this role_representation.
- - This parameter is not required for updating or deleting a role_representation but providing it will reduce the
- number of API calls required.
+ - This parameter is not required for updating or deleting a role_representation but providing it reduces the number
+ of API calls required.
extends_documentation_fragment:
- community.general.keycloak
- community.general.keycloak.actiongroup_keycloak
@@ -190,7 +190,7 @@ proposed:
description: Representation of proposed client role mapping.
returned: always
type: dict
- sample: {clientId: "test"}
+ sample: {"clientId": "test"}
existing:
description:
@@ -198,7 +198,13 @@ existing:
- The sample is truncated.
returned: always
type: dict
- sample: {"adminUrl": "http://www.example.com/admin_url", "attributes": {"request.object.signature.alg": "RS256"}}
+ sample:
+ {
+ "adminUrl": "http://www.example.com/admin_url",
+ "attributes": {
+ "request.object.signature.alg": "RS256"
+ }
+ }
end_state:
description:
@@ -206,7 +212,13 @@ end_state:
- The sample is truncated.
returned: on success
type: dict
- sample: {"adminUrl": "http://www.example.com/admin_url", "attributes": {"request.object.signature.alg": "RS256"}}
+ sample:
+ {
+ "adminUrl": "http://www.example.com/admin_url",
+ "attributes": {
+ "request.object.signature.alg": "RS256"
+ }
+ }
"""
from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, \
@@ -242,9 +254,9 @@ def main():
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True,
- required_one_of=([['token', 'auth_realm', 'auth_username', 'auth_password'],
+ required_one_of=([['token', 'auth_realm', 'auth_username', 'auth_password', 'auth_client_id', 'auth_client_secret'],
['uid', 'target_username', 'service_account_user_client_id']]),
- required_together=([['auth_realm', 'auth_username', 'auth_password']]),
+ required_together=([['auth_username', 'auth_password']]),
required_by={'refresh_token': 'auth_realm'},
)
@@ -350,7 +362,7 @@ def main():
# Assign roles
result['changed'] = True
if module._diff:
- result['diff'] = dict(before=assigned_roles_before, after=update_roles)
+ result['diff'] = dict(before={"roles": assigned_roles_before}, after={"roles": update_roles})
if module.check_mode:
module.exit_json(**result)
kc.add_user_rolemapping(uid=uid, cid=cid, role_rep=update_roles, realm=realm)
@@ -365,7 +377,7 @@ def main():
# Remove mapping of role
result['changed'] = True
if module._diff:
- result['diff'] = dict(before=assigned_roles_before, after=update_roles)
+ result['diff'] = dict(before={"roles": assigned_roles_before}, after={"roles": update_roles})
if module.check_mode:
module.exit_json(**result)
kc.delete_user_rolemapping(uid=uid, cid=cid, role_rep=update_roles, realm=realm)
diff --git a/plugins/modules/keycloak_userprofile.py b/plugins/modules/keycloak_userprofile.py
index f637271497..9760a17ecf 100644
--- a/plugins/modules/keycloak_userprofile.py
+++ b/plugins/modules/keycloak_userprofile.py
@@ -33,9 +33,8 @@ options:
state:
description:
- State of the User Profile provider.
- - On V(present), the User Profile provider will be created if it does not yet exist, or updated with the parameters
- you provide.
- - On V(absent), the User Profile provider will be removed if it exists.
+ - On V(present), the User Profile provider is created if it does not yet exist, or updated with the parameters you provide.
+ - On V(absent), the User Profile provider is removed if it exists.
default: 'present'
type: str
choices:
@@ -171,7 +170,7 @@ options:
group:
description:
- - Specifies the User Profile group where this attribute will be added.
+ - Specifies the User Profile group where this attribute is added.
type: str
permissions:
@@ -406,7 +405,6 @@ data:
description: The data returned by the Keycloak API.
returned: when state is present
type: dict
- sample: {'...': '...'}
"""
from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, camel, \
@@ -456,7 +454,6 @@ def main():
),
config=dict(
type='dict',
- required=False,
options={
'kc_user_profile_config': dict(
type='list',
@@ -466,7 +463,6 @@ def main():
'attributes': dict(
type='list',
elements='dict',
- required=False,
options={
'name': dict(type='str', required=True),
'display_name': dict(type='str', aliases=['displayName'], required=True),
@@ -476,17 +472,17 @@ def main():
'length': dict(
type='dict',
options={
- 'min': dict(type='int', required=False),
+ 'min': dict(type='int'),
'max': dict(type='int', required=True)
}
),
- 'email': dict(type='dict', required=False),
- 'username_prohibited_characters': dict(type='dict', aliases=['usernameProhibitedCharacters'], required=False),
- 'up_username_not_idn_homograph': dict(type='dict', aliases=['upUsernameNotIdnHomograph'], required=False),
- 'person_name_prohibited_characters': dict(type='dict', aliases=['personNameProhibitedCharacters'], required=False),
- 'uri': dict(type='dict', required=False),
- 'pattern': dict(type='dict', required=False),
- 'options': dict(type='dict', required=False)
+ 'email': dict(type='dict'),
+ 'username_prohibited_characters': dict(type='dict', aliases=['usernameProhibitedCharacters']),
+ 'up_username_not_idn_homograph': dict(type='dict', aliases=['upUsernameNotIdnHomograph']),
+ 'person_name_prohibited_characters': dict(type='dict', aliases=['personNameProhibitedCharacters']),
+ 'uri': dict(type='dict'),
+ 'pattern': dict(type='dict'),
+ 'options': dict(type='dict')
}
),
'annotations': dict(type='dict'),
@@ -513,15 +509,15 @@ def main():
options={
'name': dict(type='str', required=True),
'display_header': dict(type='str', aliases=['displayHeader'], required=True),
- 'display_description': dict(type='str', aliases=['displayDescription'], required=False),
- 'annotations': dict(type='dict', required=False)
+ 'display_description': dict(type='str', aliases=['displayDescription']),
+ 'annotations': dict(type='dict')
}
),
'unmanaged_attribute_policy': dict(
type='str',
aliases=['unmanagedAttributePolicy'],
choices=['ENABLED', 'ADMIN_EDIT', 'ADMIN_VIEW'],
- required=False
+
)
}
)
@@ -533,8 +529,8 @@ def main():
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True,
- required_one_of=([['token', 'auth_realm', 'auth_username', 'auth_password']]),
- required_together=([['auth_realm', 'auth_username', 'auth_password']]),
+ required_one_of=([['token', 'auth_realm', 'auth_username', 'auth_password', 'auth_client_id', 'auth_client_secret']]),
+ required_together=([['auth_username', 'auth_password']]),
required_by={'refresh_token': 'auth_realm'},
)
diff --git a/plugins/modules/keyring.py b/plugins/modules/keyring.py
index 3a8cbcae02..eef59dd10a 100644
--- a/plugins/modules/keyring.py
+++ b/plugins/modules/keyring.py
@@ -206,10 +206,10 @@ def run_module():
username=dict(type="str", required=True),
keyring_password=dict(type="str", required=True, no_log=True),
user_password=dict(
- type="str", required=False, no_log=True, aliases=["password"]
+ type="str", no_log=True, aliases=["password"]
),
state=dict(
- type="str", required=False, default="present", choices=["absent", "present"]
+ type="str", default="present", choices=["absent", "present"]
),
)
diff --git a/plugins/modules/kibana_plugin.py b/plugins/modules/kibana_plugin.py
index 09703b504c..b975e2dcea 100644
--- a/plugins/modules/kibana_plugin.py
+++ b/plugins/modules/kibana_plugin.py
@@ -59,11 +59,11 @@ options:
version:
description:
- Version of the plugin to be installed.
- - If plugin exists with previous version, plugin will B(not) be updated unless O(force) is set to V(true).
+ - If the plugin is installed with in a previous version, it is B(not) updated unless O(force=true).
type: str
force:
description:
- - Delete and re-install the plugin. Can be useful for plugins update.
+ - Delete and re-install the plugin. It can be useful for plugins update.
type: bool
default: false
allow_root:
@@ -109,14 +109,6 @@ timeout:
description: The timeout for plugin download.
returned: success
type: str
-stdout:
- description: The command stdout.
- returned: success
- type: str
-stderr:
- description: The command stderr.
- returned: success
- type: str
state:
description: The state for the managed plugin.
returned: success
@@ -236,11 +228,11 @@ def main():
argument_spec=dict(
name=dict(required=True),
state=dict(default="present", choices=list(PACKAGE_STATE_MAP.keys())),
- url=dict(default=None),
+ url=dict(),
timeout=dict(default="1m"),
plugin_bin=dict(default="/opt/kibana/bin/kibana", type="path"),
plugin_dir=dict(default="/opt/kibana/installedPlugins/", type="path"),
- version=dict(default=None),
+ version=dict(),
force=dict(default=False, type="bool"),
allow_root=dict(default=False, type="bool"),
),
diff --git a/plugins/modules/krb_ticket.py b/plugins/modules/krb_ticket.py
index e021050c22..3a01944535 100644
--- a/plugins/modules/krb_ticket.py
+++ b/plugins/modules/krb_ticket.py
@@ -30,7 +30,7 @@ options:
principal:
description:
- The principal name.
- - If not set, the user running this module will be used.
+ - If not set, the user running this module is used.
type: str
state:
description:
@@ -50,17 +50,17 @@ options:
- Use O(cache_name) as the ticket cache name and location.
- If this option is not used, the default cache name and location are used.
- The default credentials cache may vary between systems.
- - If not set the the value of E(KRB5CCNAME) environment variable will be used instead, its value is used to name the
- default ticket cache.
+ - If not set the the value of E(KRB5CCNAME) environment variable is used instead, its value is used to name the default
+ ticket cache.
type: str
lifetime:
description:
- Requests a ticket with the lifetime, if the O(lifetime) is not specified, the default ticket lifetime is used.
- - Specifying a ticket lifetime longer than the maximum ticket lifetime (configured by each site) will not override the
+ - Specifying a ticket lifetime longer than the maximum ticket lifetime (configured by each site) does not override the
configured maximum ticket lifetime.
- 'The value for O(lifetime) must be followed by one of the following suffixes: V(s) - seconds, V(m) - minutes, V(h)
- hours, V(d) - days.'
- - You cannot mix units; a value of V(3h30m) will result in an error.
+ - You cannot mix units; a value of V(3h30m) results in an error.
- See U(https://web.mit.edu/kerberos/krb5-1.12/doc/basic/date_format.html) for reference.
type: str
start_time:
@@ -78,7 +78,7 @@ options:
- Requests renewable tickets, with a total lifetime equal to O(renewable).
- 'The value for O(renewable) must be followed by one of the following delimiters: V(s) - seconds, V(m) - minutes, V(h)
- hours, V(d) - days.'
- - You cannot mix units; a value of V(3h30m) will result in an error.
+ - You cannot mix units; a value of V(3h30m) results in an error.
- See U(https://web.mit.edu/kerberos/krb5-1.12/doc/basic/date_format.html) for reference.
type: str
forwardable:
@@ -119,7 +119,7 @@ options:
keytab:
description:
- Requests a ticket, obtained from a key in the local host's keytab.
- - If O(keytab_path) is not specified will try to use default client keytab path (C(-i) option).
+ - If O(keytab_path) is not specified it tries to use default client keytab path (C(-i) option).
type: bool
keytab_path:
description:
diff --git a/plugins/modules/launchd.py b/plugins/modules/launchd.py
index 03dc3a5928..310e1af9b1 100644
--- a/plugins/modules/launchd.py
+++ b/plugins/modules/launchd.py
@@ -37,9 +37,9 @@ options:
version_added: 10.1.0
state:
description:
- - V(started)/V(stopped) are idempotent actions that will not run commands unless necessary.
- - Launchd does not support V(restarted) nor V(reloaded) natively. These will trigger a stop/start (restarted) or an
- unload/load (reloaded).
+ - V(started)/V(stopped) are idempotent actions that do not run commands unless necessary.
+ - C(launchd) does not support V(restarted) nor V(reloaded) natively. These states trigger a stop/start (restarted) or
+ an unload/load (reloaded).
- V(restarted) unloads and loads the service before start to ensure that the latest job definition (plist) is used.
- V(reloaded) unloads and loads the service to ensure that the latest job definition (plist) is used. Whether a service
is started or stopped depends on the content of the definition file.
@@ -53,8 +53,8 @@ options:
force_stop:
description:
- Whether the service should not be restarted automatically by launchd.
- - Services might have the 'KeepAlive' attribute set to true in a launchd configuration. In case this is set to true,
- stopping a service will cause that launchd starts the service again.
+ - Services might have the C(KeepAlive) attribute set to V(true) in a launchd configuration. In case this is set to V(true),
+ stopping a service causes that C(launchd) starts the service again.
- Set this option to V(true) to let this module change the C(KeepAlive) attribute to V(false).
type: bool
default: false
@@ -111,16 +111,16 @@ EXAMPLES = r"""
RETURN = r"""
status:
- description: Metadata about service status.
- returned: always
- type: dict
- sample:
- {
- "current_pid": "-",
- "current_state": "stopped",
- "previous_pid": "82636",
- "previous_state": "running"
- }
+ description: Metadata about service status.
+ returned: always
+ type: dict
+ sample:
+ {
+ "current_pid": "-",
+ "current_state": "stopped",
+ "previous_pid": "82636",
+ "previous_state": "running"
+ }
"""
import os
diff --git a/plugins/modules/layman.py b/plugins/modules/layman.py
index b0fab39233..b19428d9f9 100644
--- a/plugins/modules/layman.py
+++ b/plugins/modules/layman.py
@@ -35,7 +35,7 @@ options:
type: str
list_url:
description:
- - An URL of the alternative overlays list that defines the overlay to install. This list will be fetched and saved under
+ - An URL of the alternative overlays list that defines the overlay to install. This list is fetched and saved under
C(${overlay_defs}/${name}.xml), where C(overlay_defs) is read from the Layman's configuration.
aliases: [url]
type: str
@@ -47,7 +47,7 @@ options:
type: str
validate_certs:
description:
- - If V(false), SSL certificates will not be validated. This should only be set to V(false) when no other option exists.
+ - If V(false), SSL certificates are not validated. This should only be set to V(false) when no other option exists.
type: bool
default: true
"""
@@ -236,7 +236,7 @@ def main():
name=dict(required=True),
list_url=dict(aliases=['url']),
state=dict(default="present", choices=['present', 'absent', 'updated']),
- validate_certs=dict(required=False, default=True, type='bool'),
+ validate_certs=dict(default=True, type='bool'),
),
supports_check_mode=True
)
diff --git a/plugins/modules/ldap_attrs.py b/plugins/modules/ldap_attrs.py
index c7ccd42154..592da93a63 100644
--- a/plugins/modules/ldap_attrs.py
+++ b/plugins/modules/ldap_attrs.py
@@ -43,10 +43,9 @@ options:
choices: [present, absent, exact]
default: present
description:
- - The state of the attribute values. If V(present), all given attribute values will be added if they are missing. If
- V(absent), all given attribute values will be removed if present. If V(exact), the set of attribute values will be
- forced to exactly those provided and no others. If O(state=exact) and the attribute value is empty, all values for
- this attribute will be removed.
+ - The state of the attribute values. If V(present), all given attribute values are added if they are missing. If V(absent),
+ all given attribute values are removed if present. If V(exact), the set of attribute values is forced to exactly those
+ provided and no others. If O(state=exact) and the attribute value is empty, all values for this attribute are removed.
attributes:
required: true
type: dict
@@ -297,7 +296,7 @@ def main():
module = AnsibleModule(
argument_spec=gen_specs(
attributes=dict(type='dict', required=True),
- ordered=dict(type='bool', default=False, required=False),
+ ordered=dict(type='bool', default=False),
state=dict(type='str', default='present', choices=['absent', 'exact', 'present']),
),
supports_check_mode=True,
diff --git a/plugins/modules/ldap_inc.py b/plugins/modules/ldap_inc.py
index ea6788de66..224027f666 100644
--- a/plugins/modules/ldap_inc.py
+++ b/plugins/modules/ldap_inc.py
@@ -26,8 +26,8 @@ notes:
and the control PostRead. This extension and the control are implemented in OpenLdap but not all directory servers implement
them. In this case, the module automatically uses a more classic method based on two phases, first the current value is
read then the modify operation remove the old value and add the new one in a single request. If the value has changed
- by a concurrent call then the remove action will fail. Then the sequence is retried 3 times before raising an error to
- the playbook. In an heavy modification environment, the module does not guarante to be systematically successful.
+ by a concurrent call then the remove action fails. Then the sequence is retried 3 times before raising an error to the
+ playbook. In an heavy modification environment, the module does not guarante to be systematically successful.
- This only deals with integer attribute of an existing entry. To modify attributes of an entry, see M(community.general.ldap_attrs)
or to add or remove whole entries, see M(community.general.ldap_entry).
author:
@@ -153,7 +153,7 @@ def main():
module = AnsibleModule(
argument_spec=gen_specs(
attribute=dict(type='str', required=True),
- increment=dict(type='int', default=1, required=False),
+ increment=dict(type='int', default=1),
method=dict(type='str', default='auto', choices=['auto', 'rfc4525', 'legacy']),
),
supports_check_mode=True,
diff --git a/plugins/modules/ldap_search.py b/plugins/modules/ldap_search.py
index 155e9859d5..47c4d8d64d 100644
--- a/plugins/modules/ldap_search.py
+++ b/plugins/modules/ldap_search.py
@@ -64,10 +64,10 @@ options:
version_added: 7.1.0
base64_attributes:
description:
- - If provided, all attribute values returned that are listed in this option will be Base64 encoded.
- - If the special value V(*) appears in this list, all attributes will be Base64 encoded.
- - All other attribute values will be converted to UTF-8 strings. If they contain binary data, please note that invalid
- UTF-8 bytes will be omitted.
+ - If provided, all attribute values returned that are listed in this option are Base64 encoded.
+ - If the special value V(*) appears in this list, all attributes are Base64 encoded.
+ - All other attribute values are converted to UTF-8 strings. If they contain binary data, please note that invalid UTF-8
+ bytes are omitted.
type: list
elements: str
version_added: 7.0.0
@@ -91,15 +91,16 @@ EXAMPLES = r"""
register: ldap_group_gids
"""
+# @FIXME RV 'results' is meant to be used when 'loop:' was used with the module.
RESULTS = r"""
results:
description:
- - For every entry found, one dictionary will be returned.
+ - For every entry found, one dictionary is returned.
- Every dictionary contains a key C(dn) with the entry's DN as a value.
- Every attribute of the entry found is added to the dictionary. If the key has precisely one value, that value is taken
directly, otherwise the key's value is a list.
- - Note that all values (for single-element lists) and list elements (for multi-valued lists) will be UTF-8 strings. Some
- might contain Base64-encoded binary data; which ones is determined by the O(base64_attributes) option.
+ - Note that all values (for single-element lists) and list elements (for multi-valued lists) are UTF-8 strings. Some might
+ contain Base64-encoded binary data; which ones is determined by the O(base64_attributes) option.
type: list
elements: dict
"""
diff --git a/plugins/modules/librato_annotation.py b/plugins/modules/librato_annotation.py
index 35fc810c65..1087cb426c 100644
--- a/plugins/modules/librato_annotation.py
+++ b/plugins/modules/librato_annotation.py
@@ -13,8 +13,8 @@ DOCUMENTATION = r"""
module: librato_annotation
short_description: Create an annotation in Librato
description:
- - Create an annotation event on the given annotation stream :name. If the annotation stream does not exist, it will be created
- automatically.
+ - Create an annotation event on the given annotation stream O(name). If the annotation stream does not exist, it creates
+ one automatically.
author: "Seth Edwards (@Sedward)"
requirements: []
extends_documentation_fragment:
@@ -39,7 +39,7 @@ options:
type: str
description:
- The annotation stream name.
- - If the annotation stream does not exist, it will be created automatically.
+ - If the annotation stream does not exist, it creates one automatically.
required: false
title:
type: str
@@ -160,12 +160,12 @@ def main():
argument_spec=dict(
user=dict(required=True),
api_key=dict(required=True, no_log=True),
- name=dict(required=False),
+ name=dict(),
title=dict(required=True),
- source=dict(required=False),
- description=dict(required=False),
- start_time=dict(required=False, default=None, type='int'),
- end_time=dict(required=False, default=None, type='int'),
+ source=dict(),
+ description=dict(),
+ start_time=dict(type='int'),
+ end_time=dict(type='int'),
links=dict(type='list', elements='dict')
)
)
diff --git a/plugins/modules/linode.py b/plugins/modules/linode.py
index fcfcce4d0a..d2c5714d47 100644
--- a/plugins/modules/linode.py
+++ b/plugins/modules/linode.py
@@ -47,8 +47,8 @@ options:
linode_id:
description:
- Unique ID of a Linode server. This value is read-only in the sense that if you specify it on creation of a Linode
- it will not be used. The Linode API generates these IDs and we can those generated value here to reference a Linode
- more specifically. This is useful for idempotence.
+ it is not used. The Linode API generates these IDs and we can those generated value here to reference a Linode more
+ specifically. This is useful for idempotency.
aliases: [lid]
type: int
additional_disks:
@@ -103,7 +103,7 @@ options:
type: int
backupwindow:
description:
- - The time window in which backups will be taken.
+ - The time window in which backups are taken.
type: int
plan:
description:
diff --git a/plugins/modules/linode_v4.py b/plugins/modules/linode_v4.py
index b650f7f104..0095cb9002 100644
--- a/plugins/modules/linode_v4.py
+++ b/plugins/modules/linode_v4.py
@@ -17,7 +17,7 @@ requirements:
author:
- Luke Murphy (@decentral1se)
notes:
- - No Linode resizing is currently implemented. This module will, in time, replace the current Linode module which uses deprecated
+ - No Linode resizing is currently implemented. This module aims to replace the current Linode module which uses deprecated
API bindings on the Linode side.
extends_documentation_fragment:
- community.general.attributes
@@ -43,7 +43,7 @@ options:
type: str
label:
description:
- - The instance label. This label is used as the main determiner for idempotence for the module and is therefore mandatory.
+ - The instance label. This label is used as the main determiner for idempotency for the module and is therefore mandatory.
type: str
required: true
group:
@@ -53,7 +53,7 @@ options:
type: str
private_ip:
description:
- - If V(true), the created Linode will have private networking enabled and assigned a private IPv4 address.
+ - If V(true), the created Linode instance has private networking enabled and assigned a private IPv4 address.
type: bool
default: false
version_added: 3.0.0
@@ -65,8 +65,8 @@ options:
elements: str
root_pass:
description:
- - The password for the root user. If not specified, one will be generated. This generated password will be available
- in the task success JSON.
+ - The password for the root user. If not specified, it generates a new one. This generated password is available in
+ the task success JSON.
type: str
authorized_keys:
description:
@@ -128,45 +128,46 @@ instance:
description: The instance description in JSON serialized form.
returned: Always.
type: dict
- sample: {
- "root_pass": "foobar", # if auto-generated
- "alerts": {
- "cpu": 90,
- "io": 10000,
- "network_in": 10,
- "network_out": 10,
- "transfer_quota": 80
- },
- "backups": {
- "enabled": false,
- "schedule": {
- "day": null,
- "window": null
- }
- },
- "created": "2018-09-26T08:12:33",
- "group": "Foobar Group",
- "hypervisor": "kvm",
- "id": 10480444,
- "image": "linode/centos7",
- "ipv4": [
- "130.132.285.233"
- ],
- "ipv6": "2a82:7e00::h03c:46ff:fe04:5cd2/64",
- "label": "lin-foo",
- "region": "eu-west",
- "specs": {
- "disk": 25600,
- "memory": 1024,
- "transfer": 1000,
- "vcpus": 1
- },
- "status": "running",
- "tags": [],
- "type": "g6-nanode-1",
- "updated": "2018-09-26T10:10:14",
- "watchdog_enabled": true
- }
+ sample:
+ {
+ "root_pass": "foobar", # if auto-generated
+ "alerts": {
+ "cpu": 90,
+ "io": 10000,
+ "network_in": 10,
+ "network_out": 10,
+ "transfer_quota": 80
+ },
+ "backups": {
+ "enabled": false,
+ "schedule": {
+ "day": null,
+ "window": null
+ }
+ },
+ "created": "2018-09-26T08:12:33",
+ "group": "Foobar Group",
+ "hypervisor": "kvm",
+ "id": 10480444,
+ "image": "linode/centos7",
+ "ipv4": [
+ "130.132.285.233"
+ ],
+ "ipv6": "2a82:7e00::h03c:46ff:fe04:5cd2/64",
+ "label": "lin-foo",
+ "region": "eu-west",
+ "specs": {
+ "disk": 25600,
+ "memory": 1024,
+ "transfer": 1000,
+ "vcpus": 1
+ },
+ "status": "running",
+ "tags": [],
+ "type": "g6-nanode-1",
+ "updated": "2018-09-26T10:10:14",
+ "watchdog_enabled": true
+ }
"""
import traceback
diff --git a/plugins/modules/listen_ports_facts.py b/plugins/modules/listen_ports_facts.py
index 9f9eb66481..a33c78be3c 100644
--- a/plugins/modules/listen_ports_facts.py
+++ b/plugins/modules/listen_ports_facts.py
@@ -20,7 +20,7 @@ requirements:
short_description: Gather facts on processes listening on TCP and UDP ports
notes:
- C(ss) returns all processes for each listen address and port.
- - This plugin will return each of them, so multiple entries for the same listen address and port are likely in results.
+ - This plugin returns each of them, so multiple entries for the same listen address and port are likely in results.
extends_documentation_fragment:
- community.general.attributes
- community.general.attributes.facts
@@ -29,7 +29,7 @@ options:
command:
description:
- Override which command to use for fetching listen ports.
- - By default module will use first found supported command on the system (in alphanumerical order).
+ - By default module uses first found supported command on the system (in alphanumerical order).
type: str
choices:
- netstat
@@ -397,7 +397,7 @@ def main():
break
if bin_path is None:
- raise EnvironmentError(msg='Unable to find any of the supported commands in PATH: {0}'.format(", ".join(sorted(commands_map))))
+ raise EnvironmentError('Unable to find any of the supported commands in PATH: {0}'.format(", ".join(sorted(commands_map))))
# which ports are listening for connections?
args = commands_map[command]['args']
diff --git a/plugins/modules/lldp.py b/plugins/modules/lldp.py
index 7f4a820257..018d9fc307 100644
--- a/plugins/modules/lldp.py
+++ b/plugins/modules/lldp.py
@@ -98,7 +98,7 @@ def gather_lldp(module):
def main():
module_args = dict(
- multivalues=dict(type='bool', required=False, default=False)
+ multivalues=dict(type='bool', default=False)
)
module = AnsibleModule(module_args)
diff --git a/plugins/modules/locale_gen.py b/plugins/modules/locale_gen.py
index db9ea191e8..2e1932c204 100644
--- a/plugins/modules/locale_gen.py
+++ b/plugins/modules/locale_gen.py
@@ -37,10 +37,10 @@ options:
choices: [absent, present]
default: present
notes:
- - If C(/etc/locale.gen) exists, the module will assume to be using the B(glibc) mechanism, else if C(/var/lib/locales/supported.d/)
- exists it will assume to be using the B(ubuntu_legacy) mechanism, else it will raise an error.
- - When using glibc mechanism, it will manage locales by editing C(/etc/locale.gen) and running C(locale-gen).
- - When using ubuntu_legacy mechanism, it will manage locales by editing C(/var/lib/locales/supported.d/local) and then running
+ - If C(/etc/locale.gen) exists, the module assumes to be using the B(glibc) mechanism, else if C(/var/lib/locales/supported.d/)
+ exists it assumes to be using the B(ubuntu_legacy) mechanism, else it raises an error.
+ - When using glibc mechanism, it manages locales by editing C(/etc/locale.gen) and running C(locale-gen).
+ - When using ubuntu_legacy mechanism, it manages locales by editing C(/var/lib/locales/supported.d/local) and then running
C(locale-gen).
- Please note that the code path that uses ubuntu_legacy mechanism has not been tested for a while, because Ubuntu is already
using the glibc mechanism. There is no support for that, given our inability to test it. Therefore, that mechanism is
@@ -111,7 +111,6 @@ class LocaleGen(StateModuleHelper):
),
supports_check_mode=True,
)
- use_old_vardict = False
def __init_module__(self):
self.MECHANISMS = dict(
diff --git a/plugins/modules/logentries.py b/plugins/modules/logentries.py
index 420f054fac..69e83f5e49 100644
--- a/plugins/modules/logentries.py
+++ b/plugins/modules/logentries.py
@@ -139,8 +139,8 @@ def main():
argument_spec=dict(
path=dict(required=True),
state=dict(default="present", choices=["present", "followed", "absent", "unfollowed"]),
- name=dict(required=False, default=None, type='str'),
- logtype=dict(required=False, default=None, type='str', aliases=['type'])
+ name=dict(type='str'),
+ logtype=dict(type='str', aliases=['type'])
),
supports_check_mode=True
)
diff --git a/plugins/modules/logentries_msg.py b/plugins/modules/logentries_msg.py
index dd3b88d624..8b2a7c5155 100644
--- a/plugins/modules/logentries_msg.py
+++ b/plugins/modules/logentries_msg.py
@@ -45,7 +45,7 @@ options:
author: "Jimmy Tang (@jcftang) "
"""
-RETURN = """# """
+RETURN = """#"""
EXAMPLES = r"""
- name: Send a message to logentries
diff --git a/plugins/modules/logstash_plugin.py b/plugins/modules/logstash_plugin.py
index ba7bdc2cc5..afacf7767f 100644
--- a/plugins/modules/logstash_plugin.py
+++ b/plugins/modules/logstash_plugin.py
@@ -49,7 +49,7 @@ options:
version:
type: str
description:
- - Specify plugin Version of the plugin to install. If plugin exists with previous version, it will NOT be updated.
+ - Specify version of the plugin to install. If the plugin exists with a previous version, it is B(not) updated.
"""
EXAMPLES = r"""
diff --git a/plugins/modules/lvg.py b/plugins/modules/lvg.py
index b16cdd87a2..42d4c9182e 100644
--- a/plugins/modules/lvg.py
+++ b/plugins/modules/lvg.py
@@ -33,7 +33,7 @@ options:
description:
- List of comma-separated devices to use as physical devices in this volume group.
- Required when creating or resizing volume group.
- - The module will take care of running pvcreate if needed.
+ - The module runs C(pvcreate) if needed.
- O(remove_extra_pvs) controls whether or not unspecified physical devices are removed from the volume group.
type: list
elements: str
@@ -123,7 +123,9 @@ EXAMPLES = r"""
- name: Create or resize a volume group on top of /dev/sdb1 and /dev/sdc5.
community.general.lvg:
vg: vg.services
- pvs: /dev/sdb1,/dev/sdc5
+ pvs:
+ - /dev/sdb1
+ - /dev/sdc5
- name: Remove a volume group with name vg.services
community.general.lvg:
@@ -146,6 +148,13 @@ EXAMPLES = r"""
state: active
vg: vg.services
+- name: Add new PVs to volume group without removing existing ones
+ community.general.lvg:
+ vg: vg.services
+ pvs: /dev/sdb1,/dev/sdc1
+ remove_extra_pvs: false
+ state: present
+
- name: Reset a volume group UUID
community.general.lvg:
state: inactive
@@ -156,7 +165,9 @@ EXAMPLES = r"""
community.general.lvg:
state: inactive
vg: vg.services
- pvs: /dev/sdb1,/dev/sdc5
+ pvs:
+ - /dev/sdb1
+ - /dev/sdc5
reset_vg_uuid: true
reset_pv_uuid: true
"""
diff --git a/plugins/modules/lvm_pv.py b/plugins/modules/lvm_pv.py
new file mode 100644
index 0000000000..15740db8c1
--- /dev/null
+++ b/plugins/modules/lvm_pv.py
@@ -0,0 +1,191 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2025, Klention Mali
+# Based on lvol module by Jeroen Hoekx
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r"""
+module: lvm_pv
+short_description: Manage LVM Physical Volumes
+version_added: "11.0.0"
+description:
+ - Creates, resizes or removes LVM Physical Volumes.
+author:
+ - Klention Mali (@klention)
+options:
+ device:
+ description:
+ - Path to the block device to manage.
+ type: path
+ required: true
+ state:
+ description:
+ - Control if the physical volume exists.
+ type: str
+ choices: [present, absent]
+ default: present
+ force:
+ description:
+ - Force the operation.
+ - When O(state=present) (creating a PV), this uses C(pvcreate -f) to force creation.
+ - When O(state=absent) (removing a PV), this uses C(pvremove -ff) to force removal even if part of a volume group.
+ type: bool
+ default: false
+ resize:
+ description:
+ - Resize PV to device size when O(state=present).
+ type: bool
+ default: false
+notes:
+ - Requires LVM2 utilities installed on the target system.
+ - Device path must exist when creating a PV.
+"""
+
+EXAMPLES = r"""
+- name: Creating physical volume on /dev/sdb
+ community.general.lvm_pv:
+ device: /dev/sdb
+
+- name: Creating and resizing (if needed) physical volume
+ community.general.lvm_pv:
+ device: /dev/sdb
+ resize: true
+
+- name: Removing physical volume that is not part of any volume group
+ community.general.lvm_pv:
+ device: /dev/sdb
+ state: absent
+
+- name: Force removing physical volume that is already part of a volume group
+ community.general.lvm_pv:
+ device: /dev/sdb
+ force: true
+ state: absent
+"""
+
+RETURN = r"""
+"""
+
+
+import os
+from ansible.module_utils.basic import AnsibleModule
+
+
+def get_pv_status(module, device):
+ """Check if the device is already a PV."""
+ cmd = ['pvs', '--noheadings', '--readonly', device]
+ return module.run_command(cmd)[0] == 0
+
+
+def get_pv_size(module, device):
+ """Get current PV size in bytes."""
+ cmd = ['pvs', '--noheadings', '--nosuffix', '--units', 'b', '-o', 'pv_size', device]
+ rc, out, err = module.run_command(cmd, check_rc=True)
+ return int(out.strip())
+
+
+def rescan_device(module, device):
+ """Perform storage rescan for the device."""
+ # Extract the base device name (e.g., /dev/sdb -> sdb)
+ base_device = os.path.basename(device)
+ rescan_path = "/sys/block/{0}/device/rescan".format(base_device)
+
+ if os.path.exists(rescan_path):
+ try:
+ with open(rescan_path, 'w') as f:
+ f.write('1')
+ return True
+ except IOError as e:
+ module.warn("Failed to rescan device {0}: {1}".format(device, str(e)))
+ return False
+ else:
+ module.warn("Rescan path not found for device {0}".format(device))
+ return False
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ device=dict(type='path', required=True),
+ state=dict(type='str', default='present', choices=['present', 'absent']),
+ force=dict(type='bool', default=False),
+ resize=dict(type='bool', default=False),
+ ),
+ supports_check_mode=True,
+ )
+
+ device = module.params['device']
+ state = module.params['state']
+ force = module.params['force']
+ resize = module.params['resize']
+ changed = False
+ actions = []
+
+ # Validate device existence for present state
+ if state == 'present' and not os.path.exists(device):
+ module.fail_json(msg="Device %s not found" % device)
+
+ is_pv = get_pv_status(module, device)
+
+ if state == 'present':
+ # Create PV if needed
+ if not is_pv:
+ if module.check_mode:
+ changed = True
+ actions.append('would be created')
+ else:
+ cmd = ['pvcreate']
+ if force:
+ cmd.append('-f')
+ cmd.append(device)
+ rc, out, err = module.run_command(cmd, check_rc=True)
+ changed = True
+ actions.append('created')
+ is_pv = True
+
+ # Handle resizing
+ elif resize and is_pv:
+ if module.check_mode:
+ # In check mode, assume resize would change
+ changed = True
+ actions.append('would be resized')
+ else:
+ # Perform device rescan if each time
+ if rescan_device(module, device):
+ actions.append('rescanned')
+ original_size = get_pv_size(module, device)
+ rc, out, err = module.run_command(['pvresize', device], check_rc=True)
+ new_size = get_pv_size(module, device)
+ if new_size != original_size:
+ changed = True
+ actions.append('resized')
+
+ elif state == 'absent':
+ if is_pv:
+ if module.check_mode:
+ changed = True
+ actions.append('would be removed')
+ else:
+ cmd = ['pvremove', '-y']
+ if force:
+ cmd.append('-ff')
+ changed = True
+ cmd.append(device)
+ rc, out, err = module.run_command(cmd, check_rc=True)
+ actions.append('removed')
+
+ # Generate final message
+ if actions:
+ msg = "PV %s: %s" % (device, ', '.join(actions))
+ else:
+ msg = "No changes needed for PV %s" % device
+ module.exit_json(changed=changed, msg=msg)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/plugins/modules/lxc_container.py b/plugins/modules/lxc_container.py
index 8d5face301..cbd643efdb 100644
--- a/plugins/modules/lxc_container.py
+++ b/plugins/modules/lxc_container.py
@@ -129,13 +129,13 @@ options:
archive:
description:
- Create an archive of a container.
- - This will create a tarball of the running container.
+ - This creates a tarball of the running container.
type: bool
default: false
archive_path:
description:
- Path the save the archived container.
- - If the path does not exist the archive method will attempt to create it.
+ - If the path does not exist the archive method attempts to create it.
type: path
archive_compression:
choices:
@@ -157,8 +157,8 @@ options:
description:
- Define the state of a container.
- If you clone a container using O(clone_name) the newly cloned container created in a stopped state.
- - The running container will be stopped while the clone operation is happening and upon completion of the clone the
- original container state will be restored.
+ - The running container is stopped while the clone operation is happening and upon completion of the clone the original
+ container state is restored.
type: str
default: started
container_config:
@@ -172,15 +172,15 @@ requirements:
- 'python3-lxc # OS Package'
notes:
- Containers must have a unique name. If you attempt to create a container with a name that already exists in the users
- namespace the module will simply return as "unchanged".
- - The O(container_command) can be used with any state except V(absent). If used with state V(stopped) the container will
- be V(started), the command executed, and then the container V(stopped) again. Likewise if O(state=stopped) and the container
- does not exist it will be first created, V(started), the command executed, and then V(stopped). If you use a C(|) in the
- variable you can use common script formatting within the variable itself. The O(container_command) option will always
- execute as C(bash). When using O(container_command), a log file is created in the C(/tmp/) directory which contains both
- C(stdout) and C(stderr) of any command executed.
- - If O(archive=true) the system will attempt to create a compressed tarball of the running container. The O(archive) option
- supports LVM backed containers and will create a snapshot of the running container when creating the archive.
+ namespace the module simply returns as "unchanged".
+ - The O(container_command) can be used with any state except V(absent). If used with state V(stopped) the container is V(started),
+ the command executed, and then the container V(stopped) again. Likewise if O(state=stopped) and the container does not
+ exist it is first created, V(started), the command executed, and then V(stopped). If you use a C(|) in the variable you
+ can use common script formatting within the variable itself. The O(container_command) option always execute as C(bash).
+ When using O(container_command), a log file is created in the C(/tmp/) directory which contains both C(stdout) and C(stderr)
+ of any command executed.
+ - If O(archive=true) the system attempts to create a compressed tarball of the running container. The O(archive) option
+ supports LVM backed containers and creates a snapshot of the running container when creating the archive.
- If your distro does not have a package for C(python3-lxc), which is a requirement for this module, it can be installed
from source at U(https://github.com/lxc/python3-lxc) or installed using C(pip install lxc).
"""
diff --git a/plugins/modules/lxca_cmms.py b/plugins/modules/lxca_cmms.py
index 8ece67470b..87b0e2e125 100644
--- a/plugins/modules/lxca_cmms.py
+++ b/plugins/modules/lxca_cmms.py
@@ -144,8 +144,8 @@ FUNC_DICT = {
INPUT_ARG_SPEC = dict(
command_options=dict(default='cmms', choices=['cmms', 'cmms_by_uuid',
'cmms_by_chassis_uuid']),
- uuid=dict(default=None),
- chassis=dict(default=None)
+ uuid=dict(),
+ chassis=dict()
)
diff --git a/plugins/modules/lxca_nodes.py b/plugins/modules/lxca_nodes.py
index f133671114..91d3337b27 100644
--- a/plugins/modules/lxca_nodes.py
+++ b/plugins/modules/lxca_nodes.py
@@ -175,7 +175,7 @@ INPUT_ARG_SPEC = dict(
'nodes_by_chassis_uuid',
'nodes_status_managed',
'nodes_status_unmanaged']),
- uuid=dict(default=None), chassis=dict(default=None)
+ uuid=dict(), chassis=dict()
)
diff --git a/plugins/modules/lxd_container.py b/plugins/modules/lxd_container.py
index f347df7492..4abec5acaa 100644
--- a/plugins/modules/lxd_container.py
+++ b/plugins/modules/lxd_container.py
@@ -101,9 +101,9 @@ options:
type: str
target:
description:
- - For cluster deployments. Will attempt to create an instance on a target node. If the instance exists elsewhere in
- a cluster, then it will not be replaced or moved. The name should respond to same name of the node you see in C(lxc
- cluster list).
+ - For cluster deployments. It attempts to create an instance on a target node. If the instance exists elsewhere in a
+ cluster, then it is not replaced nor moved. The name should respond to same name of the node you see in C(lxc cluster
+ list).
type: str
required: false
version_added: 1.0.0
@@ -134,7 +134,7 @@ options:
type: bool
wait_for_container:
description:
- - If set to V(true), the tasks will wait till the task reports a success status when performing container operations.
+ - If set to V(true), the tasks wait until the task reports a success status when performing container operations.
default: false
type: bool
version_added: 4.4.0
@@ -180,7 +180,7 @@ options:
type: str
notes:
- Instances can be a container or a virtual machine, both of them must have unique name. If you attempt to create an instance
- with a name that already existed in the users namespace the module will simply return as "unchanged".
+ with a name that already existed in the users namespace, the module simply returns as "unchanged".
- There are two ways to run commands inside a container or virtual machine, using the command module or using the ansible
lxd connection plugin bundled in Ansible >= 2.1, the later requires python to be installed in the instance which can be
done with the command module.
@@ -262,7 +262,7 @@ EXAMPLES = r"""
source:
type: image
mode: pull
- # Provides Ubuntu minimal images
+ # Provides Ubuntu minimal images
server: https://cloud-images.ubuntu.com/minimal/releases/
protocol: simplestreams
alias: "22.04"
@@ -393,7 +393,12 @@ addresses:
description: Mapping from the network device name to a list of IPv4 addresses in the instance.
returned: when state is started or restarted
type: dict
- sample: {"eth0": ["10.155.92.191"]}
+ sample:
+ {
+ "eth0": [
+ "10.155.92.191"
+ ]
+ }
old_state:
description: The old state of the instance.
returned: when state is started or restarted
diff --git a/plugins/modules/lxd_profile.py b/plugins/modules/lxd_profile.py
index efdf50ea90..2525889968 100644
--- a/plugins/modules/lxd_profile.py
+++ b/plugins/modules/lxd_profile.py
@@ -57,7 +57,7 @@ options:
new_name:
description:
- A new name of a profile.
- - If this parameter is specified a profile will be renamed to this name.
+ - If this parameter is specified a profile is renamed to this name.
- See U(https://documentation.ubuntu.com/lxd/en/latest/api/#/profiles/profile_post).
required: false
type: str
@@ -113,7 +113,7 @@ options:
type: str
notes:
- Profiles must have a unique name. If you attempt to create a profile with a name that already existed in the users namespace
- the module will simply return as "unchanged".
+ the module simply returns as "unchanged".
"""
EXAMPLES = r"""
diff --git a/plugins/modules/lxd_project.py b/plugins/modules/lxd_project.py
index 98068175aa..20804f8b38 100644
--- a/plugins/modules/lxd_project.py
+++ b/plugins/modules/lxd_project.py
@@ -42,14 +42,14 @@ options:
new_name:
description:
- A new name of a project.
- - If this parameter is specified a project will be renamed to this name.
+ - If this parameter is specified a project is renamed to this name.
- See U(https://documentation.ubuntu.com/lxd/en/latest/api/#/projects/project_post).
required: false
type: str
merge_project:
description:
- Merge the configuration of the present project with the new desired configuration, instead of replacing it. If configuration
- is the same after merged, no change will be made.
+ is the same after merged, no change is made.
required: false
default: false
type: bool
@@ -98,7 +98,7 @@ options:
type: str
notes:
- Projects must have a unique name. If you attempt to create a project with a name that already existed in the users namespace
- the module will simply return as "unchanged".
+ the module simply returns as "unchanged".
"""
EXAMPLES = r"""
diff --git a/plugins/modules/macports.py b/plugins/modules/macports.py
index 3f02eeb411..c328e45904 100644
--- a/plugins/modules/macports.py
+++ b/plugins/modules/macports.py
@@ -280,7 +280,7 @@ def main():
selfupdate=dict(aliases=["update_cache", "update_ports"], default=False, type='bool'),
state=dict(default="present", choices=["present", "installed", "absent", "removed", "active", "inactive"]),
upgrade=dict(default=False, type='bool'),
- variant=dict(aliases=["variants"], default=None, type='str')
+ variant=dict(aliases=["variants"], type='str')
)
)
diff --git a/plugins/modules/mail.py b/plugins/modules/mail.py
index 03192e5bf8..7c8bdb69b3 100644
--- a/plugins/modules/mail.py
+++ b/plugins/modules/mail.py
@@ -90,7 +90,7 @@ options:
attach:
description:
- A list of pathnames of files to attach to the message.
- - Attached files will have their content-type set to C(application/octet-stream).
+ - Attached files have their content-type set to C(application/octet-stream).
type: list
elements: path
default: []
@@ -115,12 +115,12 @@ options:
default: plain
secure:
description:
- - If V(always), the connection will only send email if the connection is Encrypted. If the server does not accept the
- encrypted connection it will fail.
- - If V(try), the connection will attempt to setup a secure SSL/TLS session, before trying to send.
- - If V(never), the connection will not attempt to setup a secure SSL/TLS session, before sending.
- - If V(starttls), the connection will try to upgrade to a secure SSL/TLS connection, before sending. If it is unable
- to do so it will fail.
+ - If V(always), the connection only sends email if the connection is Encrypted. If the server does not accept the encrypted
+ connection it fails.
+ - If V(try), the connection attempts to setup a secure SSL/TLS session, before trying to send.
+ - If V(never), the connection does not attempt to setup a secure SSL/TLS session, before sending.
+ - If V(starttls), the connection tries to upgrade to a secure SSL/TLS connection, before sending. If it is unable to
+ do so it fails.
type: str
choices: [always, never, starttls, try]
default: try
@@ -137,7 +137,7 @@ options:
message_id_domain:
description:
- The domain name to use for the L(Message-ID header, https://en.wikipedia.org/wiki/Message-ID).
- - Note that this is only available on Python 3+. On Python 2, this value will be ignored.
+ - Note that this is only available on Python 3+. On Python 2, this value is ignored.
type: str
default: ansible
version_added: 8.2.0
@@ -242,7 +242,7 @@ def main():
password=dict(type='str', no_log=True),
host=dict(type='str', default='localhost'),
port=dict(type='int', default=25),
- ehlohost=dict(type='str', default=None),
+ ehlohost=dict(type='str'),
sender=dict(type='str', default='root', aliases=['from']),
to=dict(type='list', elements='str', default=['root'], aliases=['recipients']),
cc=dict(type='list', elements='str', default=[]),
diff --git a/plugins/modules/make.py b/plugins/modules/make.py
index a574560f7f..57ee525db5 100644
--- a/plugins/modules/make.py
+++ b/plugins/modules/make.py
@@ -48,7 +48,7 @@ options:
params:
description:
- Any extra parameters to pass to make.
- - If the value is empty, only the key will be used. For example, V(FOO:) will produce V(FOO), not V(FOO=).
+ - If the value is empty, only the key is used. For example, V(FOO:) produces V(FOO), not V(FOO=).
type: dict
target:
description:
diff --git a/plugins/modules/manageiq_alerts.py b/plugins/modules/manageiq_alerts.py
index 87fafcf10b..d1b3fdba69 100644
--- a/plugins/modules/manageiq_alerts.py
+++ b/plugins/modules/manageiq_alerts.py
@@ -300,7 +300,7 @@ def main():
expression=dict(type='dict'),
options=dict(type='dict'),
enabled=dict(type='bool'),
- state=dict(required=False, default='present',
+ state=dict(default='present',
choices=['present', 'absent']),
)
# add the manageiq connection arguments to the arguments
diff --git a/plugins/modules/manageiq_group.py b/plugins/modules/manageiq_group.py
index 9781ebfc98..68170ea733 100644
--- a/plugins/modules/manageiq_group.py
+++ b/plugins/modules/manageiq_group.py
@@ -75,8 +75,8 @@ options:
managed_filters_merge_mode:
type: str
description:
- - In merge mode existing categories are kept or updated, new categories are added.
- - In replace mode all categories will be replaced with the supplied O(managed_filters).
+ - In V(merge) mode existing categories are kept or updated, new categories are added.
+ - In V(replace) mode all categories are replaced with the supplied O(managed_filters).
choices: [merge, replace]
default: replace
belongsto_filters:
@@ -570,14 +570,14 @@ def main():
argument_spec = dict(
description=dict(required=True, type='str'),
state=dict(choices=['absent', 'present'], default='present'),
- role_id=dict(required=False, type='int'),
- role=dict(required=False, type='str'),
- tenant_id=dict(required=False, type='int'),
- tenant=dict(required=False, type='str'),
- managed_filters=dict(required=False, type='dict'),
- managed_filters_merge_mode=dict(required=False, choices=['merge', 'replace'], default='replace'),
- belongsto_filters=dict(required=False, type='list', elements='str'),
- belongsto_filters_merge_mode=dict(required=False, choices=['merge', 'replace'], default='replace'),
+ role_id=dict(type='int'),
+ role=dict(type='str'),
+ tenant_id=dict(type='int'),
+ tenant=dict(type='str'),
+ managed_filters=dict(type='dict'),
+ managed_filters_merge_mode=dict(choices=['merge', 'replace'], default='replace'),
+ belongsto_filters=dict(type='list', elements='str'),
+ belongsto_filters_merge_mode=dict(choices=['merge', 'replace'], default='replace'),
)
# add the manageiq connection arguments to the arguments
argument_spec.update(manageiq_argument_spec())
diff --git a/plugins/modules/manageiq_policies.py b/plugins/modules/manageiq_policies.py
index 6e2ac36a38..247e2dc94c 100644
--- a/plugins/modules/manageiq_policies.py
+++ b/plugins/modules/manageiq_policies.py
@@ -45,8 +45,21 @@ options:
description:
- The type of the resource to which the profile should be [un]assigned.
required: true
- choices: ['provider', 'host', 'vm', 'blueprint', 'category', 'cluster', 'data store', 'group', 'resource pool', 'service',
- 'service template', 'template', 'tenant', 'user']
+ choices:
+ - provider
+ - host
+ - vm
+ - blueprint
+ - category
+ - cluster
+ - data store
+ - group
+ - resource pool
+ - service
+ - service template
+ - template
+ - tenant
+ - user
resource_name:
type: str
description:
@@ -89,35 +102,36 @@ EXAMPLES = r"""
RETURN = r"""
manageiq_policies:
- description:
- - List current policy_profile and policies for a provider in ManageIQ.
- returned: always
- type: dict
- sample: {
- "changed": false,
- "profiles": [
+ description:
+ - List current policy_profile and policies for a provider in ManageIQ.
+ returned: always
+ type: dict
+ sample:
+ {
+ "changed": false,
+ "profiles": [
+ {
+ "policies": [
{
- "policies": [
- {
- "active": true,
- "description": "OpenSCAP",
- "name": "openscap policy"
- },
- {
- "active": true,
- "description": "Analyse incoming container images",
- "name": "analyse incoming container images"
- },
- {
- "active": true,
- "description": "Schedule compliance after smart state analysis",
- "name": "schedule compliance after smart state analysis"
- }
- ],
- "profile_description": "OpenSCAP profile",
- "profile_name": "openscap profile"
+ "active": true,
+ "description": "OpenSCAP",
+ "name": "openscap policy"
+ },
+ {
+ "active": true,
+ "description": "Analyse incoming container images",
+ "name": "analyse incoming container images"
+ },
+ {
+ "active": true,
+ "description": "Schedule compliance after smart state analysis",
+ "name": "schedule compliance after smart state analysis"
}
- ]
+ ],
+ "profile_description": "OpenSCAP profile",
+ "profile_name": "openscap profile"
+ }
+ ]
}
"""
@@ -133,7 +147,7 @@ def main():
resource_name=dict(type='str'),
resource_type=dict(required=True, type='str',
choices=list(manageiq_entities().keys())),
- state=dict(required=False, type='str',
+ state=dict(type='str',
choices=['present', 'absent'], default='present'),
)
# add the manageiq connection arguments to the arguments
diff --git a/plugins/modules/manageiq_policies_info.py b/plugins/modules/manageiq_policies_info.py
index 4ef51515a6..2db694f11c 100644
--- a/plugins/modules/manageiq_policies_info.py
+++ b/plugins/modules/manageiq_policies_info.py
@@ -29,8 +29,21 @@ options:
description:
- The type of the resource to obtain the profile for.
required: true
- choices: ['provider', 'host', 'vm', 'blueprint', 'category', 'cluster', 'data store', 'group', 'resource pool', 'service',
- 'service template', 'template', 'tenant', 'user']
+ choices:
+ - provider
+ - host
+ - vm
+ - blueprint
+ - category
+ - cluster
+ - data store
+ - group
+ - resource pool
+ - service
+ - service template
+ - template
+ - tenant
+ - user
resource_name:
type: str
description:
@@ -83,8 +96,8 @@ from ansible_collections.community.general.plugins.module_utils.manageiq import
def main():
argument_spec = dict(
- resource_id=dict(required=False, type='int'),
- resource_name=dict(required=False, type='str'),
+ resource_id=dict(type='int'),
+ resource_name=dict(type='str'),
resource_type=dict(required=True, type='str',
choices=list(manageiq_entities().keys())),
)
diff --git a/plugins/modules/manageiq_provider.py b/plugins/modules/manageiq_provider.py
index 98677c7beb..334555c29a 100644
--- a/plugins/modules/manageiq_provider.py
+++ b/plugins/modules/manageiq_provider.py
@@ -31,7 +31,7 @@ options:
description:
- V(absent) - provider should not exist,
- V(present) - provider should be present,
- - V(refresh) - provider will be refreshed.
+ - V(refresh) - provider is refreshed.
choices: ['absent', 'present', 'refresh']
default: 'present'
name:
@@ -44,7 +44,7 @@ options:
choices: ['Openshift', 'Amazon', 'oVirt', 'VMware', 'Azure', 'Director', 'OpenStack', 'GCE']
zone:
type: str
- description: The ManageIQ zone name that will manage the provider.
+ description: The ManageIQ zone name that manages the provider.
default: 'default'
provider_region:
type: str
@@ -63,7 +63,7 @@ options:
description: Google Compute Engine Project ID.
azure_tenant_id:
type: str
- description: Tenant ID. defaults to None.
+ description: Tenant ID. Defaults to V(null).
aliases: [keystone_v3_domain_id]
tenant_mapping_enabled:
type: bool
@@ -190,25 +190,25 @@ options:
description: The provider's API port.
userid:
type: str
- description: Provider's API endpoint authentication userid. defaults to None.
+ description: Provider's API endpoint authentication userid. Defaults to V(null).
password:
type: str
- description: Provider's API endpoint authentication password. defaults to None.
+ description: Provider's API endpoint authentication password. Defaults to V(null).
auth_key:
type: str
- description: Provider's API endpoint authentication bearer token. defaults to None.
+ description: Provider's API endpoint authentication bearer token. Defaults to V(null).
validate_certs:
type: bool
- description: Whether SSL certificates should be verified for HTTPS requests (deprecated). defaults to True.
+ description: Whether SSL certificates should be verified for HTTPS requests (deprecated). Defaults to V(true).
default: true
aliases: [verify_ssl]
security_protocol:
type: str
choices: ['ssl-with-validation', 'ssl-with-validation-custom-ca', 'ssl-without-validation', 'non-ssl']
- description: How SSL certificates should be used for HTTPS requests. defaults to None.
+ description: How SSL certificates should be used for HTTPS requests. Defaults to V(null).
certificate_authority:
type: str
- description: The CA bundle string with custom certificates. defaults to None.
+ description: The CA bundle string with custom certificates. Defaults to V(null).
path:
type: str
description:
diff --git a/plugins/modules/manageiq_tags.py b/plugins/modules/manageiq_tags.py
index f4136d1732..efd135393d 100644
--- a/plugins/modules/manageiq_tags.py
+++ b/plugins/modules/manageiq_tags.py
@@ -45,16 +45,29 @@ options:
description:
- The relevant resource type in manageiq.
required: true
- choices: ['provider', 'host', 'vm', 'blueprint', 'category', 'cluster', 'data store', 'group', 'resource pool', 'service',
- 'service template', 'template', 'tenant', 'user']
+ choices:
+ - provider
+ - host
+ - vm
+ - blueprint
+ - category
+ - cluster
+ - data store
+ - group
+ - resource pool
+ - service
+ - service template
+ - template
+ - tenant
+ - user
resource_name:
type: str
description:
- - The name of the resource at which tags will be controlled.
+ - The name of the resource at which tags are be controlled.
- Must be specified if O(resource_id) is not set. Both options are mutually exclusive.
resource_id:
description:
- - The ID of the resource at which tags will be controlled.
+ - The ID of the resource at which tags are controlled.
- Must be specified if O(resource_name) is not set. Both options are mutually exclusive.
type: int
version_added: 2.2.0
@@ -125,7 +138,7 @@ def main():
resource_name=dict(type='str'),
resource_type=dict(required=True, type='str',
choices=list(manageiq_entities().keys())),
- state=dict(required=False, type='str',
+ state=dict(type='str',
choices=['present', 'absent'], default='present'),
)
# add the manageiq connection arguments to the arguments
diff --git a/plugins/modules/manageiq_tags_info.py b/plugins/modules/manageiq_tags_info.py
index a39f4b84d3..2a742f69c5 100644
--- a/plugins/modules/manageiq_tags_info.py
+++ b/plugins/modules/manageiq_tags_info.py
@@ -27,16 +27,29 @@ options:
description:
- The relevant resource type in ManageIQ.
required: true
- choices: ['provider', 'host', 'vm', 'blueprint', 'category', 'cluster', 'data store', 'group', 'resource pool', 'service',
- 'service template', 'template', 'tenant', 'user']
+ choices:
+ - provider
+ - host
+ - vm
+ - blueprint
+ - category
+ - cluster
+ - data store
+ - group
+ - resource pool
+ - service
+ - service template
+ - template
+ - tenant
+ - user
resource_name:
type: str
description:
- - The name of the resource at which tags will be controlled.
+ - The name of the resource at which tags are controlled.
- Must be specified if O(resource_id) is not set. Both options are mutually exclusive.
resource_id:
description:
- - The ID of the resource at which tags will be controlled.
+ - The ID of the resource at which tags are controlled.
- Must be specified if O(resource_name) is not set. Both options are mutually exclusive.
type: int
"""
diff --git a/plugins/modules/manageiq_tenant.py b/plugins/modules/manageiq_tenant.py
index deb2fc452d..fda97509ce 100644
--- a/plugins/modules/manageiq_tenant.py
+++ b/plugins/modules/manageiq_tenant.py
@@ -482,8 +482,8 @@ def main():
argument_spec = dict(
name=dict(required=True, type='str'),
description=dict(required=True, type='str'),
- parent_id=dict(required=False, type='int'),
- parent=dict(required=False, type='str'),
+ parent_id=dict(type='int'),
+ parent=dict(type='str'),
state=dict(choices=['absent', 'present'], default='present'),
quotas=dict(type='dict', default={})
)
diff --git a/plugins/modules/manageiq_user.py b/plugins/modules/manageiq_user.py
index a4d5c21dfc..475086c823 100644
--- a/plugins/modules/manageiq_user.py
+++ b/plugins/modules/manageiq_user.py
@@ -59,8 +59,8 @@ options:
default: always
choices: ['always', 'on_create']
description:
- - V(always) will update passwords unconditionally.
- - V(on_create) will only set the password for a newly created user.
+ - V(always) updates passwords unconditionally.
+ - V(on_create) only sets the password for a newly created user.
"""
EXAMPLES = r"""
diff --git a/plugins/modules/matrix.py b/plugins/modules/matrix.py
index 186c57dd31..fb6c797bff 100644
--- a/plugins/modules/matrix.py
+++ b/plugins/modules/matrix.py
@@ -13,7 +13,7 @@ author: "Jan Christian Grünhage (@jcgruenhage)"
module: matrix
short_description: Send notifications to matrix
description:
- - This module sends html formatted notifications to matrix rooms.
+ - This module sends HTML formatted notifications to matrix rooms.
extends_documentation_fragment:
- community.general.attributes
attributes:
@@ -99,9 +99,9 @@ def run_module():
msg_html=dict(type='str', required=True),
room_id=dict(type='str', required=True),
hs_url=dict(type='str', required=True),
- token=dict(type='str', required=False, no_log=True),
- user_id=dict(type='str', required=False),
- password=dict(type='str', required=False, no_log=True),
+ token=dict(type='str', no_log=True),
+ user_id=dict(type='str'),
+ password=dict(type='str', no_log=True),
)
result = dict(
diff --git a/plugins/modules/mattermost.py b/plugins/modules/mattermost.py
index ed046e6481..4cb32c1f3b 100644
--- a/plugins/modules/mattermost.py
+++ b/plugins/modules/mattermost.py
@@ -38,7 +38,7 @@ options:
type: str
description:
- Mattermost webhook API key. Log into your Mattermost site, go to Menu -> Integration -> Incoming Webhook -> Add Incoming
- Webhook. This will give you full URL. O(api_key) is the last part. U(http://mattermost.example.com/hooks/API_KEY).
+ Webhook. This gives you a full URL. O(api_key) is the last part. U(http://mattermost.example.com/hooks/API_KEY).
required: true
text:
type: str
@@ -75,8 +75,8 @@ options:
version_added: 10.0.0
validate_certs:
description:
- - If V(false), SSL certificates will not be validated. This should only be used on personally controlled sites using
- self-signed certificates.
+ - If V(false), SSL certificates are not validated. This should only be used on personally controlled sites using self-signed
+ certificates.
default: true
type: bool
"""
@@ -137,10 +137,10 @@ def main():
url=dict(type='str', required=True),
api_key=dict(type='str', required=True, no_log=True),
text=dict(type='str'),
- channel=dict(type='str', default=None),
+ channel=dict(type='str'),
username=dict(type='str', default='Ansible'),
icon_url=dict(type='str', default='https://docs.ansible.com/favicon.ico'),
- priority=dict(type='str', default=None, choices=['important', 'urgent']),
+ priority=dict(type='str', choices=['important', 'urgent']),
validate_certs=dict(default=True, type='bool'),
attachments=dict(type='list', elements='dict'),
),
diff --git a/plugins/modules/maven_artifact.py b/plugins/modules/maven_artifact.py
index a165c5a32a..af3be70f39 100644
--- a/plugins/modules/maven_artifact.py
+++ b/plugins/modules/maven_artifact.py
@@ -16,8 +16,7 @@ module: maven_artifact
short_description: Downloads an Artifact from a Maven Repository
description:
- Downloads an artifact from a maven repository given the maven coordinates provided to the module.
- - Can retrieve snapshots or release versions of the artifact and will resolve the latest available version if one is not
- available.
+ - Can retrieve snapshots or release versions of the artifact and resolve the latest available version if one is not available.
author: "Chris Schmidt (@chrisisbeef)"
requirements:
- lxml
@@ -85,8 +84,8 @@ options:
type: dict
force_basic_auth:
description:
- - C(httplib2), the library used by the URI module only sends authentication information when a webservice responds to an
- initial request with a 401 status. Since some basic auth services do not properly send a 401, logins will fail. This
+ - C(httplib2), the library used by the URI module only sends authentication information when a webservice responds to
+ an initial request with a 401 status. Since some basic auth services do not properly send a 401, logins fail. This
option forces the sending of the Basic authentication header upon initial request.
default: false
type: bool
@@ -110,7 +109,7 @@ options:
default: 10
validate_certs:
description:
- - If V(false), SSL certificates will not be validated. This should only be set to V(false) when no other option exists.
+ - If V(false), SSL certificates are not validated. This should only be set to V(false) when no other option exists.
type: bool
default: true
client_cert:
@@ -135,13 +134,13 @@ options:
verify_checksum:
type: str
description:
- - If V(never), the MD5/SHA1 checksum will never be downloaded and verified.
- - If V(download), the MD5/SHA1 checksum will be downloaded and verified only after artifact download. This is the default.
- - If V(change), the MD5/SHA1 checksum will be downloaded and verified if the destination already exist, to verify if
- they are identical. This was the behaviour before 2.6. Since it downloads the checksum before (maybe) downloading
- the artifact, and since some repository software, when acting as a proxy/cache, return a 404 error if the artifact
- has not been cached yet, it may fail unexpectedly. If you still need it, you should consider using V(always) instead
- - if you deal with a checksum, it is better to use it to verify integrity after download.
+ - If V(never), the MD5/SHA1 checksum is never downloaded and verified.
+ - If V(download), the MD5/SHA1 checksum is downloaded and verified only after artifact download. This is the default.
+ - If V(change), the MD5/SHA1 checksum is downloaded and verified if the destination already exist, to verify if they
+ are identical. This was the behaviour before 2.6. Since it downloads the checksum before (maybe) downloading the artifact,
+ and since some repository software, when acting as a proxy/cache, return a 404 error if the artifact has not been
+ cached yet, it may fail unexpectedly. If you still need it, you should consider using V(always) instead - if you deal
+ with a checksum, it is better to use it to verify integrity after download.
- V(always) combines V(download) and V(change).
required: false
default: 'download'
@@ -149,9 +148,9 @@ options:
checksum_alg:
type: str
description:
- - If V(md5), checksums will use the MD5 algorithm. This is the default.
- - If V(sha1), checksums will use the SHA1 algorithm. This can be used on systems configured to use FIPS-compliant algorithms,
- since MD5 will be blocked on such systems.
+ - If V(md5), checksums use the MD5 algorithm. This is the default.
+ - If V(sha1), checksums use the SHA1 algorithm. This can be used on systems configured to use FIPS-compliant algorithms,
+ since MD5 is blocked on such systems.
default: 'md5'
choices: ['md5', 'sha1']
version_added: 3.2.0
@@ -245,7 +244,6 @@ import tempfile
import traceback
import re
-from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
from ansible.module_utils.ansible_release import __version__ as ansible_version
from re import match
@@ -623,35 +621,32 @@ def main():
argument_spec=dict(
group_id=dict(required=True),
artifact_id=dict(required=True),
- version=dict(default=None),
- version_by_spec=dict(default=None),
+ version=dict(),
+ version_by_spec=dict(),
classifier=dict(default=''),
extension=dict(default='jar'),
repository_url=dict(default='https://repo1.maven.org/maven2'),
- username=dict(default=None, aliases=['aws_secret_key']),
- password=dict(default=None, no_log=True, aliases=['aws_secret_access_key']),
+ username=dict(aliases=['aws_secret_key']),
+ password=dict(no_log=True, aliases=['aws_secret_access_key']),
headers=dict(type='dict'),
force_basic_auth=dict(default=False, type='bool'),
state=dict(default="present", choices=["present", "absent"]), # TODO - Implement a "latest" state
timeout=dict(default=10, type='int'),
dest=dict(type="path", required=True),
- validate_certs=dict(required=False, default=True, type='bool'),
- client_cert=dict(type="path", required=False),
- client_key=dict(type="path", required=False),
- keep_name=dict(required=False, default=False, type='bool'),
- verify_checksum=dict(required=False, default='download', choices=['never', 'download', 'change', 'always']),
- checksum_alg=dict(required=False, default='md5', choices=['md5', 'sha1']),
- unredirected_headers=dict(type='list', elements='str', required=False),
+ validate_certs=dict(default=True, type='bool'),
+ client_cert=dict(type="path"),
+ client_key=dict(type="path"),
+ keep_name=dict(default=False, type='bool'),
+ verify_checksum=dict(default='download', choices=['never', 'download', 'change', 'always']),
+ checksum_alg=dict(default='md5', choices=['md5', 'sha1']),
+ unredirected_headers=dict(type='list', elements='str'),
directory_mode=dict(type='str'),
),
add_file_common_args=True,
mutually_exclusive=([('version', 'version_by_spec')])
)
- if LooseVersion(ansible_version) < LooseVersion("2.12") and module.params['unredirected_headers']:
- module.fail_json(msg="Unredirected Headers parameter provided, but your ansible-core version does not support it. Minimum version is 2.12")
-
- if LooseVersion(ansible_version) >= LooseVersion("2.12") and module.params['unredirected_headers'] is None:
+ if module.params['unredirected_headers'] is None:
# if the user did not supply unredirected params, we use the default, ONLY on ansible core 2.12 and above
module.params['unredirected_headers'] = ['Authorization', 'Cookie']
diff --git a/plugins/modules/memset_dns_reload.py b/plugins/modules/memset_dns_reload.py
index 7781abbf76..cb8ebe9191 100644
--- a/plugins/modules/memset_dns_reload.py
+++ b/plugins/modules/memset_dns_reload.py
@@ -36,9 +36,9 @@ options:
default: false
type: bool
description:
- - Boolean value, if set will poll the reload job's status and return when the job has completed (unless the 30 second
- timeout is reached first). If the timeout is reached then the task will not be marked as failed, but stderr will indicate
- that the polling failed.
+ - If V(true), it polls the reload job's status and return when the job has completed (unless the 30 second timeout is
+ reached first). If the timeout is reached then the task does not return as failed, but stderr indicates that the polling
+ failed.
"""
EXAMPLES = r"""
@@ -167,7 +167,7 @@ def main():
module = AnsibleModule(
argument_spec=dict(
api_key=dict(required=True, type='str', no_log=True),
- poll=dict(required=False, default=False, type='bool')
+ poll=dict(default=False, type='bool')
),
supports_check_mode=False
)
diff --git a/plugins/modules/memset_server_info.py b/plugins/modules/memset_server_info.py
index 3c0829ce09..59d395a161 100644
--- a/plugins/modules/memset_server_info.py
+++ b/plugins/modules/memset_server_info.py
@@ -74,31 +74,32 @@ memset_api:
description: Details about the firewall group this server is in.
returned: always
type: dict
- sample: {
- "default_outbound_policy": "RETURN",
- "name": "testyaa-fw1",
- "nickname": "testyaa cPanel rules",
- "notes": "",
- "public": false,
- "rules": {
- "51d7db54d39c3544ef7c48baa0b9944f": {
- "action": "ACCEPT",
- "comment": "",
- "dest_ip6s": "any",
- "dest_ips": "any",
- "dest_ports": "any",
- "direction": "Inbound",
- "ip_version": "any",
- "ordering": 2,
- "protocols": "icmp",
- "rule_group_name": "testyaa-fw1",
- "rule_id": "51d7db54d39c3544ef7c48baa0b9944f",
- "source_ip6s": "any",
- "source_ips": "any",
- "source_ports": "any"
+ sample:
+ {
+ "default_outbound_policy": "RETURN",
+ "name": "testyaa-fw1",
+ "nickname": "testyaa cPanel rules",
+ "notes": "",
+ "public": false,
+ "rules": {
+ "51d7db54d39c3544ef7c48baa0b9944f": {
+ "action": "ACCEPT",
+ "comment": "",
+ "dest_ip6s": "any",
+ "dest_ips": "any",
+ "dest_ports": "any",
+ "direction": "Inbound",
+ "ip_version": "any",
+ "ordering": 2,
+ "protocols": "icmp",
+ "rule_group_name": "testyaa-fw1",
+ "rule_id": "51d7db54d39c3544ef7c48baa0b9944f",
+ "source_ip6s": "any",
+ "source_ips": "any",
+ "source_ports": "any"
+ }
}
}
- }
firewall_type:
description: The type of firewall the server has (for example self-managed, managed).
returned: always
@@ -118,15 +119,16 @@ memset_api:
description: List of dictionaries of all IP addresses assigned to the server.
returned: always
type: list
- sample: [
- {
- "address": "1.2.3.4",
- "bytes_in_today": 1000.0,
- "bytes_in_yesterday": 2000.0,
- "bytes_out_today": 1000.0,
- "bytes_out_yesterday": 2000.0
- }
- ]
+ sample:
+ [
+ {
+ "address": "1.2.3.4",
+ "bytes_in_today": 1000.0,
+ "bytes_in_yesterday": 2000.0,
+ "bytes_out_today": 1000.0,
+ "bytes_out_yesterday": 2000.0
+ }
+ ]
monitor:
description: Whether the server has monitoring enabled.
returned: always
@@ -146,7 +148,7 @@ memset_api:
description: The network zone(s) the server is in.
returned: always
type: list
- sample: ['reading']
+ sample: ["reading"]
nickname:
description: Customer-set nickname for the server.
returned: always
@@ -221,10 +223,14 @@ memset_api:
description: Dictionary of tagged and untagged VLANs this server is in.
returned: always
type: dict
- sample: {
- tagged: [],
- untagged: [ 'testyaa-vlan1', 'testyaa-vlan2' ]
- }
+ sample:
+ {
+ "tagged": [],
+ "untagged": [
+ "testyaa-vlan1",
+ "testyaa-vlan2"
+ ]
+ }
vulnscan:
description: Vulnerability scanning level.
returned: always
diff --git a/plugins/modules/memset_zone.py b/plugins/modules/memset_zone.py
index 2c80503bec..553328909d 100644
--- a/plugins/modules/memset_zone.py
+++ b/plugins/modules/memset_zone.py
@@ -289,8 +289,8 @@ def main():
state=dict(required=True, choices=['present', 'absent'], type='str'),
api_key=dict(required=True, type='str', no_log=True),
name=dict(required=True, aliases=['nickname'], type='str'),
- ttl=dict(required=False, default=0, choices=[0, 300, 600, 900, 1800, 3600, 7200, 10800, 21600, 43200, 86400], type='int'),
- force=dict(required=False, default=False, type='bool')
+ ttl=dict(default=0, choices=[0, 300, 600, 900, 1800, 3600, 7200, 10800, 21600, 43200, 86400], type='int'),
+ force=dict(default=False, type='bool')
),
supports_check_mode=True
)
diff --git a/plugins/modules/memset_zone_record.py b/plugins/modules/memset_zone_record.py
index 7c16ee31eb..fd87c35fa0 100644
--- a/plugins/modules/memset_zone_record.py
+++ b/plugins/modules/memset_zone_record.py
@@ -356,15 +356,15 @@ def main():
global module
module = AnsibleModule(
argument_spec=dict(
- state=dict(required=False, default='present', choices=['present', 'absent'], type='str'),
+ state=dict(default='present', choices=['present', 'absent'], type='str'),
api_key=dict(required=True, type='str', no_log=True),
zone=dict(required=True, type='str'),
type=dict(required=True, choices=['A', 'AAAA', 'CNAME', 'MX', 'NS', 'SRV', 'TXT'], type='str'),
address=dict(required=True, aliases=['ip', 'data'], type='str'),
- record=dict(required=False, default='', type='str'),
- ttl=dict(required=False, default=0, choices=[0, 300, 600, 900, 1800, 3600, 7200, 10800, 21600, 43200, 86400], type='int'),
- priority=dict(required=False, default=0, type='int'),
- relative=dict(required=False, default=False, type='bool')
+ record=dict(default='', type='str'),
+ ttl=dict(default=0, choices=[0, 300, 600, 900, 1800, 3600, 7200, 10800, 21600, 43200, 86400], type='int'),
+ priority=dict(default=0, type='int'),
+ relative=dict(default=False, type='bool')
),
supports_check_mode=True
)
diff --git a/plugins/modules/mksysb.py b/plugins/modules/mksysb.py
index d3c9abeac0..7e188ec844 100644
--- a/plugins/modules/mksysb.py
+++ b/plugins/modules/mksysb.py
@@ -77,7 +77,7 @@ options:
storage_path:
type: str
description:
- - Storage path where the mksysb will stored.
+ - Storage path where the mksysb backup is stored.
required: true
use_snapshot:
description:
@@ -96,10 +96,6 @@ EXAMPLES = r"""
"""
RETURN = r"""
-changed:
- description: Return changed for mksysb actions as true or false.
- returned: always
- type: bool
msg:
description: Return message regarding the action.
returned: always
@@ -141,7 +137,6 @@ class MkSysB(ModuleHelper):
backup_dmapi_fs=cmd_runner_fmt.as_bool("-A"),
combined_path=cmd_runner_fmt.as_func(cmd_runner_fmt.unpack_args(lambda p, n: ["%s/%s" % (p, n)])),
)
- use_old_vardict = False
def __init_module__(self):
if not os.path.isdir(self.vars.storage_path):
diff --git a/plugins/modules/modprobe.py b/plugins/modules/modprobe.py
index cff77e9558..d5bb6fddbb 100644
--- a/plugins/modules/modprobe.py
+++ b/plugins/modules/modprobe.py
@@ -50,11 +50,11 @@ options:
- Persistency between reboots for configured module.
- This option creates files in C(/etc/modules-load.d/) and C(/etc/modprobe.d/) that make your module configuration persistent
during reboots.
- - If V(present), adds module name to C(/etc/modules-load.d/) and params to C(/etc/modprobe.d/) so the module will be
- loaded on next reboot.
- - If V(absent), will comment out module name from C(/etc/modules-load.d/) and comment out params from C(/etc/modprobe.d/)
- so the module will not be loaded on next reboot.
- - If V(disabled), will not touch anything and leave C(/etc/modules-load.d/) and C(/etc/modprobe.d/) as it is.
+ - If V(present), adds module name to C(/etc/modules-load.d/) and params to C(/etc/modprobe.d/) so the module is loaded
+ on next reboot.
+ - If V(absent), comments out module name from C(/etc/modules-load.d/) and comments out params from C(/etc/modprobe.d/)
+ so the module is not loaded on next reboot.
+ - If V(disabled), does not touch anything and leaves C(/etc/modules-load.d/) and C(/etc/modprobe.d/) as it is.
- Note that it is usually a better idea to rely on the automatic module loading by PCI IDs, USB IDs, DMI IDs or similar
triggers encoded in the kernel modules themselves instead of configuration like this.
- In fact, most modern kernel modules are prepared for automatic loading already.
diff --git a/plugins/modules/monit.py b/plugins/modules/monit.py
index 65b6c606e9..a10967264c 100644
--- a/plugins/modules/monit.py
+++ b/plugins/modules/monit.py
@@ -35,8 +35,8 @@ options:
type: str
timeout:
description:
- - If there are pending actions for the service monitored by monit, then Ansible will check for up to this many seconds
- to verify the requested action has been performed. Ansible will sleep for five seconds between each check.
+ - If there are pending actions for the service monitored by monit, then it checks for up to this many seconds to verify
+ the requested action has been performed. The module sleeps for five seconds between each check.
default: 300
type: int
author:
diff --git a/plugins/modules/mqtt.py b/plugins/modules/mqtt.py
index 9c610d02c7..b35a257da7 100644
--- a/plugins/modules/mqtt.py
+++ b/plugins/modules/mqtt.py
@@ -44,7 +44,7 @@ options:
type: str
description:
- MQTT client identifier.
- - If not specified, a value C(hostname + pid) will be used.
+ - If not specified, it uses a value C(hostname + pid).
topic:
type: str
description:
@@ -72,22 +72,22 @@ options:
type: path
description:
- The path to the Certificate Authority certificate files that are to be treated as trusted by this client. If this
- is the only option given then the client will operate in a similar manner to a web browser. That is to say it will
- require the broker to have a certificate signed by the Certificate Authorities in ca_certs and will communicate using
- TLS v1, but will not attempt any form of authentication. This provides basic network encryption but may not be sufficient
+ is the only option given then the client operates in a similar manner to a web browser. That is to say it requires
+ the broker to have a certificate signed by the Certificate Authorities in ca_certs and communicates using TLS v1,
+ but does not attempt any form of authentication. This provides basic network encryption but may not be sufficient
depending on how the broker is configured.
aliases: [ca_certs]
client_cert:
type: path
description:
- - The path pointing to the PEM encoded client certificate. If this is not None it will be used as client information
- for TLS based authentication. Support for this feature is broker dependent.
+ - The path pointing to the PEM encoded client certificate. If this is set it is used as client information for TLS based
+ authentication. Support for this feature is broker dependent.
aliases: [certfile]
client_key:
type: path
description:
- - The path pointing to the PEM encoded client private key. If this is not None it will be used as client information
- for TLS based authentication. Support for this feature is broker dependent.
+ - The path pointing to the PEM encoded client private key. If this is set it is used as client information for TLS based
+ authentication. Support for this feature is broker dependent.
aliases: [keyfile]
tls_version:
description:
@@ -162,15 +162,15 @@ def main():
port=dict(default=1883, type='int'),
topic=dict(required=True),
payload=dict(required=True),
- client_id=dict(default=None),
+ client_id=dict(),
qos=dict(default="0", choices=["0", "1", "2"]),
retain=dict(default=False, type='bool'),
- username=dict(default=None),
- password=dict(default=None, no_log=True),
- ca_cert=dict(default=None, type='path', aliases=['ca_certs']),
- client_cert=dict(default=None, type='path', aliases=['certfile']),
- client_key=dict(default=None, type='path', aliases=['keyfile']),
- tls_version=dict(default=None, choices=['tlsv1.1', 'tlsv1.2'])
+ username=dict(),
+ password=dict(no_log=True),
+ ca_cert=dict(type='path', aliases=['ca_certs']),
+ client_cert=dict(type='path', aliases=['certfile']),
+ client_key=dict(type='path', aliases=['keyfile']),
+ tls_version=dict(choices=['tlsv1.1', 'tlsv1.2'])
),
supports_check_mode=True
)
diff --git a/plugins/modules/mssql_db.py b/plugins/modules/mssql_db.py
index e1fc222e71..8a15bfe699 100644
--- a/plugins/modules/mssql_db.py
+++ b/plugins/modules/mssql_db.py
@@ -158,7 +158,7 @@ def main():
login_password=dict(default='', no_log=True),
login_host=dict(required=True),
login_port=dict(default='1433'),
- target=dict(default=None),
+ target=dict(),
autocommit=dict(type='bool', default=False),
state=dict(
default='present', choices=['present', 'absent', 'import'])
diff --git a/plugins/modules/mssql_script.py b/plugins/modules/mssql_script.py
index 872b2ee13d..37bd0853d0 100644
--- a/plugins/modules/mssql_script.py
+++ b/plugins/modules/mssql_script.py
@@ -23,7 +23,7 @@ attributes:
check_mode:
support: partial
details:
- - The script will not be executed in check mode.
+ - The script is not be executed in check mode.
diff_mode:
support: none
@@ -64,8 +64,8 @@ options:
version_added: 8.4.0
output:
description:
- - With V(default) each row will be returned as a list of values. See RV(query_results).
- - Output format V(dict) will return dictionary with the column names as keys. See RV(query_results_dict).
+ - With V(default) each row is returned as a list of values. See RV(query_results).
+ - Output format V(dict) returns dictionary with the column names as keys. See RV(query_results_dict).
- V(dict) requires named columns to be returned by each query otherwise an error is thrown.
choices: ["dict", "default"]
default: 'default'
@@ -170,12 +170,33 @@ query_results:
type: list
elements: list
returned: success and O(output=default)
- sample: [[[["Batch 0 - Select 0"]], [["Batch 0 - Select 1"]]], [[["Batch 1 - Select 0"]]]]
+ sample:
+ [
+ [
+ [
+ [
+ "Batch 0 - Select 0"
+ ]
+ ],
+ [
+ [
+ "Batch 0 - Select 1"
+ ]
+ ]
+ ],
+ [
+ [
+ [
+ "Batch 1 - Select 0"
+ ]
+ ]
+ ]
+ ]
contains:
queries:
description:
- List of result sets of each query.
- - If a query returns no results, the results of this and all the following queries will not be included in the output.
+ - If a query returns no results, the results of this and all the following queries are not included in the output.
- Use the V(GO) keyword in O(script) to separate queries.
type: list
elements: list
@@ -197,12 +218,33 @@ query_results_dict:
type: list
elements: list
returned: success and O(output=dict)
- sample: [[[["Batch 0 - Select 0"]], [["Batch 0 - Select 1"]]], [[["Batch 1 - Select 0"]]]]
+ sample:
+ [
+ [
+ [
+ [
+ "Batch 0 - Select 0"
+ ]
+ ],
+ [
+ [
+ "Batch 0 - Select 1"
+ ]
+ ]
+ ],
+ [
+ [
+ [
+ "Batch 1 - Select 0"
+ ]
+ ]
+ ]
+ ]
contains:
queries:
description:
- List of result sets of each query.
- - If a query returns no results, the results of this and all the following queries will not be included in the output.
+ - If a query returns no results, the results of this and all the following queries are not included in the output.
Use V(GO) keyword to separate queries.
type: list
elements: list
@@ -240,7 +282,7 @@ def clean_output(o):
def run_module():
module_args = dict(
- name=dict(required=False, aliases=['db'], default=''),
+ name=dict(aliases=['db'], default=''),
login_user=dict(),
login_password=dict(no_log=True),
login_host=dict(required=True),
diff --git a/plugins/modules/nagios.py b/plugins/modules/nagios.py
index 3c12b85c0b..830a805f87 100644
--- a/plugins/modules/nagios.py
+++ b/plugins/modules/nagios.py
@@ -22,11 +22,6 @@ description:
- The C(nagios) module is not idempotent.
- All actions require the O(host) parameter to be given explicitly. In playbooks you can use the C({{inventory_hostname}})
variable to refer to the host the playbook is currently running on.
- - You can specify multiple services at once by separating them with commas, for example O(services=httpd,nfs,puppet).
- - When specifying what service to handle there is a special service value, O(host), which will handle alerts/downtime/acknowledge
- for the I(host itself), for example O(services=host). This keyword may not be given with other services at the same time.
- B(Setting alerts/downtime/acknowledge for a host does not affect alerts/downtime/acknowledge for any of the services running
- on it.) To schedule downtime for all services on particular host use keyword "all", for example O(services=all).
extends_documentation_fragment:
- community.general.attributes
attributes:
@@ -40,8 +35,20 @@ options:
- Action to take.
- The V(acknowledge) and V(forced_check) actions were added in community.general 1.2.0.
required: true
- choices: ["downtime", "delete_downtime", "enable_alerts", "disable_alerts", "silence", "unsilence", "silence_nagios",
- "unsilence_nagios", "command", "servicegroup_service_downtime", "servicegroup_host_downtime", "acknowledge", "forced_check"]
+ choices:
+ - downtime
+ - delete_downtime
+ - enable_alerts
+ - disable_alerts
+ - silence
+ - unsilence
+ - silence_nagios
+ - unsilence_nagios
+ - command
+ - servicegroup_service_downtime
+ - servicegroup_host_downtime
+ - acknowledge
+ - forced_check
type: str
host:
description:
@@ -76,6 +83,12 @@ options:
description:
- What to manage downtime/alerts for. Separate multiple services with commas.
- 'B(Required) option when O(action) is one of: V(downtime), V(acknowledge), V(forced_check), V(enable_alerts), V(disable_alerts).'
+ - You can specify multiple services at once by separating them with commas, for example O(services=httpd,nfs,puppet).
+ - When specifying what O(services) to handle there is a special service value, V(host), which handles alerts/downtime/acknowledge
+ for the I(host itself), for example O(services=host). This keyword may not be given with other services at the same
+ time. B(Setting alerts/downtime/acknowledge for a host does not affect alerts/downtime/acknowledge for any of the
+ services running on it.) To schedule downtime for all O(services) on particular host use keyword V(all), for example
+ O(services=all).
aliases: ["service"]
type: str
servicegroup:
@@ -85,8 +98,8 @@ options:
type: str
command:
description:
- - The raw command to send to nagios, which should not include the submitted time header or the line-feed.
- - B(Required) option when using the V(command) O(action).
+ - The raw command to send to Nagios, which should not include the submitted time header or the line-feed.
+ - B(Required) option when O(action=command).
type: str
author: "Tim Bielawa (@tbielawa)"
diff --git a/plugins/modules/netcup_dns.py b/plugins/modules/netcup_dns.py
index 900eb01e0d..c48e0a2fb2 100644
--- a/plugins/modules/netcup_dns.py
+++ b/plugins/modules/netcup_dns.py
@@ -68,7 +68,7 @@ options:
default: false
description:
- Whether the record should be the only one for that record type and record name. Only use with O(state=present).
- - This will delete all other records with the same record name and type.
+ - This deletes all other records with the same record name and type.
priority:
description:
- Record priority. Required for O(type=MX).
@@ -213,15 +213,15 @@ def main():
customer_id=dict(required=True, type='int'),
domain=dict(required=True),
- record=dict(required=False, default='@', aliases=['name']),
+ record=dict(default='@', aliases=['name']),
type=dict(required=True, choices=['A', 'AAAA', 'MX', 'CNAME', 'CAA', 'SRV', 'TXT',
'TLSA', 'NS', 'DS', 'OPENPGPKEY', 'SMIMEA',
'SSHFP']),
value=dict(required=True),
- priority=dict(required=False, type='int'),
- solo=dict(required=False, type='bool', default=False),
- state=dict(required=False, choices=['present', 'absent'], default='present'),
- timeout=dict(required=False, type='int', default=5),
+ priority=dict(type='int'),
+ solo=dict(type='bool', default=False),
+ state=dict(choices=['present', 'absent'], default='present'),
+ timeout=dict(type='int', default=5),
),
supports_check_mode=True
diff --git a/plugins/modules/newrelic_deployment.py b/plugins/modules/newrelic_deployment.py
index b9ce8af586..af58402a44 100644
--- a/plugins/modules/newrelic_deployment.py
+++ b/plugins/modules/newrelic_deployment.py
@@ -62,8 +62,8 @@ options:
required: false
validate_certs:
description:
- - If V(false), SSL certificates will not be validated. This should only be used on personally controlled sites using
- self-signed certificates.
+ - If V(false), SSL certificates are not validated. This should only be used on personally controlled sites using self-signed
+ certificates.
required: false
default: true
type: bool
@@ -102,14 +102,14 @@ def main():
module = AnsibleModule(
argument_spec=dict(
token=dict(required=True, no_log=True),
- app_name=dict(required=False),
- application_id=dict(required=False),
- changelog=dict(required=False),
- description=dict(required=False),
+ app_name=dict(),
+ application_id=dict(),
+ changelog=dict(),
+ description=dict(),
revision=dict(required=True),
- user=dict(required=False),
+ user=dict(),
validate_certs=dict(default=True, type='bool'),
- app_name_exact_match=dict(required=False, type='bool', default=False),
+ app_name_exact_match=dict(type='bool', default=False),
),
required_one_of=[['app_name', 'application_id']],
required_if=[('app_name_exact_match', True, ['app_name'])],
diff --git a/plugins/modules/nexmo.py b/plugins/modules/nexmo.py
index ef6502532d..2d3a62b053 100644
--- a/plugins/modules/nexmo.py
+++ b/plugins/modules/nexmo.py
@@ -45,12 +45,12 @@ options:
msg:
type: str
description:
- - Message to text to send. Messages longer than 160 characters will be split into multiple messages.
+ - Message text to send. Messages longer than 160 characters are split into multiple messages.
required: true
validate_certs:
description:
- - If V(false), SSL certificates will not be validated. This should only be used on personally controlled sites using
- self-signed certificates.
+ - If V(false), SSL certificates are not validated. This should only be used on personally controlled sites using self-signed
+ certificates.
type: bool
default: true
extends_documentation_fragment:
diff --git a/plugins/modules/nictagadm.py b/plugins/modules/nictagadm.py
index a02a8fcffd..07b17bcf9e 100644
--- a/plugins/modules/nictagadm.py
+++ b/plugins/modules/nictagadm.py
@@ -35,7 +35,7 @@ options:
type: str
etherstub:
description:
- - Specifies that the nic tag will be attached to a created O(etherstub).
+ - Specifies that the nic tag is attached to a created O(etherstub).
- Parameter O(etherstub) is mutually exclusive with both O(mtu), and O(mac).
type: bool
default: false
@@ -46,7 +46,7 @@ options:
type: int
force:
description:
- - When O(state=absent) this switch will use the C(-f) parameter and delete the nic tag regardless of existing VMs.
+ - When O(state=absent) this switch uses the C(-f) parameter and delete the nic tag regardless of existing VMs.
type: bool
default: false
state:
@@ -83,7 +83,7 @@ mac:
type: str
sample: 00:1b:21:a3:f5:4d
etherstub:
- description: Specifies if the nic tag will create and attach to an etherstub.
+ description: Specifies if the nic tag was created and attached to an etherstub.
returned: always
type: bool
sample: false
@@ -93,7 +93,7 @@ mtu:
type: int
sample: 1500
force:
- description: Shows if -f was used during the deletion of a nic tag.
+ description: Shows if C(-f) was used during the deletion of a nic tag.
returned: always
type: bool
sample: false
diff --git a/plugins/modules/nmcli.py b/plugins/modules/nmcli.py
index 751f9dd948..0d35e5aacc 100644
--- a/plugins/modules/nmcli.py
+++ b/plugins/modules/nmcli.py
@@ -21,10 +21,10 @@ extends_documentation_fragment:
description:
- Manage the network devices. Create, modify and manage various connection and device type, for example V(ethernet), V(team),
V(bond), V(vlan) and so on.
- - 'On CentOS 8 and Fedora >=29 like systems, the requirements can be met by installing the following packages: NetworkManager.'
- - 'On CentOS 7 and Fedora <=28 like systems, the requirements can be met by installing the following packages: NetworkManager-tui.'
- - 'On Ubuntu and Debian like systems, the requirements can be met by installing the following packages: network-manager.'
- - 'On openSUSE, the requirements can be met by installing the following packages: NetworkManager.'
+ - 'On CentOS 8 and Fedora >=29 like systems, the requirements can be met by installing the following packages: C(NetworkManager).'
+ - 'On CentOS 7 and Fedora <=28 like systems, the requirements can be met by installing the following packages: C(NetworkManager-tui).'
+ - 'On Ubuntu and Debian like systems, the requirements can be met by installing the following packages: C(network-manager).'
+ - 'On openSUSE, the requirements can be met by installing the following packages: C(NetworkManager).'
attributes:
check_mode:
support: full
@@ -34,8 +34,8 @@ options:
state:
description:
- Whether the device should exist or not, taking action if the state is different from what is stated.
- - Using O(state=present) to create connection will automatically bring connection up.
- - Using O(state=up) and O(state=down) will not modify connection with other parameters. These states have been added
+ - Using O(state=present) creates connection set to be brought up automatically.
+ - Using O(state=up) and O(state=down) does not modify connection with other parameters. These states have been added
in community.general 9.5.0.
type: str
required: true
@@ -46,6 +46,16 @@ options:
- Whether the connection profile can be automatically activated.
type: bool
default: true
+ autoconnect_priority:
+ description:
+ - The priority of the connection profile for autoconnect. If set, connection profiles with higher priority are preferred.
+ type: int
+ version_added: 11.0.0
+ autoconnect_retries:
+ description:
+ - The number of times to retry autoconnecting.
+ type: int
+ version_added: 11.0.0
conn_name:
description:
- The name used to call the connection. Pattern is V([-][-]).
@@ -61,9 +71,9 @@ options:
ifname:
description:
- The interface to bind the connection to.
- - The connection will only be applicable to this interface name.
+ - The connection is only applicable to this interface name.
- A special value of V(*) can be used for interface-independent connections.
- - The ifname argument is mandatory for all connection types except bond, team, bridge, vlan and vpn.
+ - The O(ifname) argument is mandatory for all connection types except bond, team, bridge, vlan and vpn.
- This parameter defaults to O(conn_name) when left unset for all connection types except vpn that removes it.
type: str
type:
@@ -85,8 +95,32 @@ options:
- If you want to control non-ethernet connection attached to V(bond), V(bridge), or V(team) consider using O(slave_type)
option.
type: str
- choices: [bond, bond-slave, bridge, bridge-slave, dummy, ethernet, generic, gre, infiniband, ipip, macvlan, sit, team,
- team-slave, vlan, vxlan, wifi, gsm, wireguard, ovs-bridge, ovs-port, ovs-interface, vpn, vrf, loopback]
+ choices:
+ - bond
+ - bond-slave
+ - bridge
+ - bridge-slave
+ - dummy
+ - ethernet
+ - generic
+ - gre
+ - infiniband
+ - ipip
+ - macvlan
+ - sit
+ - team
+ - team-slave
+ - vlan
+ - vxlan
+ - wifi
+ - gsm
+ - wireguard
+ - ovs-bridge
+ - ovs-port
+ - ovs-interface
+ - vpn
+ - vrf
+ - loopback
mode:
description:
- This is the type of device or network connection that you wish to create for a bond or bridge.
@@ -467,7 +501,7 @@ options:
runner_fast_rate:
description:
- Option specifies the rate at which our link partner is asked to transmit LACPDU packets. If this is V(true) then packets
- will be sent once per second. Otherwise they will be sent every 30 seconds.
+ are sent once per second. Otherwise they are sent every 30 seconds.
- Only allowed for O(runner=lacp).
type: bool
version_added: 6.5.0
@@ -561,7 +595,7 @@ options:
- Indicates whether Fast Initial Link Setup (802.11ai) must be enabled for the connection.
- One of V(0) (use global default value), V(1) (disable FILS), V(2) (enable FILS if the supplicant and the access
point support it) or V(3) (enable FILS and fail if not supported).
- - When set to V(0) and no global default is set, FILS will be optionally enabled.
+ - When set to V(0) and no global default is set, FILS is optionally enabled.
type: int
choices: [0, 1, 2, 3]
default: 0
@@ -605,7 +639,7 @@ options:
- Indicates whether Protected Management Frames (802.11w) must be enabled for the connection.
- One of V(0) (use global default value), V(1) (disable PMF), V(2) (enable PMF if the supplicant and the access
point support it) or V(3) (enable PMF and fail if not supported).
- - When set to V(0) and no global default is set, PMF will be optionally enabled.
+ - When set to V(0) and no global default is set, PMF is optionally enabled.
type: int
choices: [0, 1, 2, 3]
default: 0
@@ -638,8 +672,8 @@ options:
description:
- Controls the interpretation of WEP keys.
- Allowed values are V(1), in which case the key is either a 10- or 26-character hexadecimal string, or a 5- or
- 13-character ASCII password; or V(2), in which case the passphrase is provided as a string and will be hashed
- using the de-facto MD5 method to derive the actual WEP key.
+ 13-character ASCII password; or V(2), in which case the passphrase is provided as a string and it is hashed using
+ the de-facto MD5 method to derive the actual WEP key.
type: int
choices: [1, 2]
wep-key0:
@@ -674,8 +708,8 @@ options:
wps-method:
description:
- Flags indicating which mode of WPS is to be used if any.
- - There is little point in changing the default setting as NetworkManager will automatically determine whether it
- is feasible to start WPS enrollment from the Access Point capabilities.
+ - There is little point in changing the default setting as NetworkManager automatically determines whether it is
+ feasible to start WPS enrollment from the Access Point capabilities.
- WPS can be disabled by setting this property to a value of V(1).
type: int
default: 0
@@ -719,8 +753,8 @@ options:
description:
- 802.11 frequency band of the network.
- One of V(a) for 5GHz 802.11a or V(bg) for 2.4GHz 802.11.
- - This will lock associations to the Wi-Fi network to the specific band, so for example, if V(a) is specified, the
- device will not associate with the same network in the 2.4GHz band even if the network's settings are compatible.
+ - This locks associations to the Wi-Fi network to the specific band, so for example, if V(a) is specified, the device
+ does not associate with the same network in the 2.4GHz band even if the network's settings are compatible.
- This setting depends on specific driver capability and may not work with all drivers.
type: str
choices: [a, bg]
@@ -733,7 +767,7 @@ options:
channel:
description:
- Wireless channel to use for the Wi-Fi connection.
- - The device will only join (or create for Ad-Hoc networks) a Wi-Fi network on the specified channel.
+ - The device only joins (or creates for Ad-Hoc networks) a Wi-Fi network on the specified channel.
- Because channel numbers overlap between bands, this property also requires the O(wifi.band) property to be set.
type: int
default: 0
@@ -748,7 +782,7 @@ options:
- With O(wifi.cloned-mac-address) setting V(random) or V(stable), by default all bits of the MAC address are scrambled
and a locally-administered, unicast MAC address is created. This property allows to specify that certain bits
are fixed.
- - Note that the least significant bit of the first MAC address will always be unset to create a unicast MAC address.
+ - Note that the least significant bit of the first MAC address is always unset to create a unicast MAC address.
- If the property is V(null), it is eligible to be overwritten by a default connection setting.
- If the value is still V(null) or an empty string, the default is to create a locally-administered, unicast MAC
address.
@@ -758,12 +792,12 @@ options:
3 bytes using the V(random) or V(stable) algorithm.
- If the value contains one additional MAC address after the mask, this address is used instead of the current MAC
address to fill the bits that shall not be randomized.
- - For example, a value of V(FE:FF:FF:00:00:00 68:F7:28:00:00:00) will set the OUI of the MAC address to 68:F7:28,
- while the lower bits are randomized.
- - A value of V(02:00:00:00:00:00 00:00:00:00:00:00) will create a fully scrambled globally-administered, burned-in
- MAC address.
+ - For example, a value of V(FE:FF:FF:00:00:00 68:F7:28:00:00:00) sets the OUI of the MAC address to 68:F7:28, while
+ the lower bits are randomized.
+ - A value of V(02:00:00:00:00:00 00:00:00:00:00:00) creates a fully scrambled globally-administered, burned-in MAC
+ address.
- If the value contains more than one additional MAC addresses, one of them is chosen randomly. For example, V(02:00:00:00:00:00
- 00:00:00:00:00:00 02:00:00:00:00:00) will create a fully scrambled MAC address, randomly locally or globally administered.
+ 00:00:00:00:00:00 02:00:00:00:00:00) creates a fully scrambled MAC address, randomly locally or globally administered.
type: str
hidden:
description:
@@ -793,7 +827,7 @@ options:
choices: [0, 1, 2]
mac-address:
description:
- - If specified, this connection will only apply to the Wi-Fi device whose permanent MAC address matches.
+ - If specified, this connection only applies to the Wi-Fi device whose permanent MAC address matches.
- This property does not change the MAC address of the device (for example for MAC spoofing).
type: str
mode:
@@ -862,25 +896,25 @@ options:
apn:
description:
- The GPRS Access Point Name specifying the APN used when establishing a data session with the GSM-based network.
- - The APN often determines how the user will be billed for their network usage and whether the user has access to
- the Internet or just a provider-specific walled-garden, so it is important to use the correct APN for the user's
- mobile broadband plan.
+ - The APN often determines how the user is billed for their network usage and whether the user has access to the
+ Internet or just a provider-specific walled-garden, so it is important to use the correct APN for the user's mobile
+ broadband plan.
- The APN may only be composed of the characters a-z, 0-9, ., and - per GSM 03.60 Section 14.9.
type: str
auto-config:
- description: When V(true), the settings such as O(gsm.apn), O(gsm.username), or O(gsm.password) will default to values
- that match the network the modem will register to in the Mobile Broadband Provider database.
+ description: When V(true), the settings such as O(gsm.apn), O(gsm.username), or O(gsm.password) default to values
+ that match the network the modem registers to in the Mobile Broadband Provider database.
type: bool
default: false
device-id:
description:
- The device unique identifier (as given by the V(WWAN) management service) which this connection applies to.
- - If given, the connection will only apply to the specified device.
+ - If given, the connection only applies to the specified device.
type: str
home-only:
description:
- - When V(true), only connections to the home network will be allowed.
- - Connections to roaming networks will not be made.
+ - When V(true), only connections to the home network are allowed.
+ - Connections to roaming networks are not made.
type: bool
default: false
mtu:
@@ -891,7 +925,7 @@ options:
network-id:
description:
- The Network ID (GSM LAI format, ie MCC-MNC) to force specific network registration.
- - If the Network ID is specified, NetworkManager will attempt to force the device to register only on the specified
+ - If the Network ID is specified, NetworkManager attempts to force the device to register only on the specified
network.
- This can be used to ensure that the device does not roam when direct roaming control of the device is not otherwise
possible.
@@ -910,7 +944,7 @@ options:
- NMSettingSecretFlags indicating how to handle the O(gsm.password) property.
- 'Following choices are allowed: V(0) B(NONE): The system is responsible for providing and storing this secret
(default), V(1) B(AGENT_OWNED): A user secret agent is responsible for providing and storing this secret; when
- it is required agents will be asked to retrieve it V(2) B(NOT_SAVED): This secret should not be saved, but should
+ it is required agents are asked to retrieve it V(2) B(NOT_SAVED): This secret should not be saved, but should
be requested from the user each time it is needed V(4) B(NOT_REQUIRED): In situations where it cannot be automatically
determined that the secret is required (some VPNs and PPP providers do not require all secrets) this flag indicates
that the specific secret is not required.'
@@ -932,14 +966,14 @@ options:
sim-id:
description:
- The SIM card unique identifier (as given by the C(WWAN) management service) which this connection applies to.
- - If given, the connection will apply to any device also allowed by O(gsm.device-id) which contains a SIM card matching
+ - If given, the connection applies to any device also allowed by O(gsm.device-id) which contains a SIM card matching
the given identifier.
type: str
sim-operator-id:
description:
- A MCC/MNC string like V(310260) or V(21601I) identifying the specific mobile network operator which this connection
applies to.
- - If given, the connection will apply to any device also allowed by O(gsm.device-id) and O(gsm.sim-id) which contains
+ - If given, the connection applies to any device also allowed by O(gsm.device-id) and O(gsm.sim-id) which contains
a SIM card provisioned by the given operator.
type: str
username:
@@ -998,8 +1032,8 @@ options:
ip4-auto-default-route:
description:
- Whether to enable special handling of the IPv4 default route.
- - If enabled, the IPv4 default route from O(wireguard.peer-routes) will be placed to a dedicated routing-table and
- two policy routing rules will be added.
+ - If enabled, the IPv4 default route from O(wireguard.peer-routes) is placed to a dedicated routing-table and two
+ policy routing rules are added.
- The fwmark number is also used as routing-table for the default-route, and if fwmark is zero, an unused fwmark/table
is chosen automatically. This corresponds to what wg-quick does with Table=auto and what WireGuard calls "Improved
Rule-based Routing".
@@ -1009,7 +1043,7 @@ options:
- Like O(wireguard.ip4-auto-default-route), but for the IPv6 default route.
type: bool
listen-port:
- description: The WireGuard connection listen-port. If not specified, the port will be chosen randomly when the interface
+ description: The WireGuard connection listen-port. If not specified, the port is chosen randomly when the interface
comes up.
type: int
mtu:
@@ -1022,12 +1056,12 @@ options:
peer-routes:
description:
- Whether to automatically add routes for the AllowedIPs ranges of the peers.
- - If V(true) (the default), NetworkManager will automatically add routes in the routing tables according to C(ipv4.route-table)
+ - If V(true) (the default), NetworkManager automatically adds routes in the routing tables according to C(ipv4.route-table)
and C(ipv6.route-table). Usually you want this automatism enabled.
- If V(false), no such routes are added automatically. In this case, the user may want to configure static routes
in C(ipv4.routes) and C(ipv6.routes), respectively.
- Note that if the peer's AllowedIPs is V(0.0.0.0/0) or V(::/0) and the profile's C(ipv4.never-default) or C(ipv6.never-default)
- setting is enabled, the peer route for this peer will not be added automatically.
+ setting is enabled, the peer route for this peer is not added automatically.
type: bool
private-key:
description: The 256 bit private-key in base64 encoding.
@@ -1045,7 +1079,7 @@ options:
version_added: 5.1.0
suboptions:
permissions:
- description: User that will have permission to use the connection.
+ description: User that has permission to use the connection.
type: str
required: true
service-type:
@@ -1062,7 +1096,7 @@ options:
- NMSettingSecretFlags indicating how to handle the C(vpn.password) property.
- 'Following choices are allowed: V(0) B(NONE): The system is responsible for providing and storing this secret
(default); V(1) B(AGENT_OWNED): A user secret agent is responsible for providing and storing this secret; when
- it is required agents will be asked to retrieve it; V(2) B(NOT_SAVED): This secret should not be saved, but should
+ it is required agents are asked to retrieve it; V(2) B(NOT_SAVED): This secret should not be saved, but should
be requested from the user each time it is needed; V(4) B(NOT_REQUIRED): In situations where it cannot be automatically
determined that the secret is required (some VPNs and PPP providers do not require all secrets) this flag indicates
that the specific secret is not required.'
@@ -1081,7 +1115,8 @@ options:
ipsec-psk:
description:
- The pre-shared key in base64 encoding.
- - "You can encode using this Ansible jinja2 expression: V(\"0s{{ '[YOUR PRE-SHARED KEY]' | ansible.builtin.b64encode }}\")."
+ - >
+ You can encode using this Ansible Jinja2 expression: V("0s{{ '[YOUR PRE-SHARED KEY]' | ansible.builtin.b64encode }}").
- This is only used when O(vpn.ipsec-enabled=true).
type: str
sriov:
@@ -1392,7 +1427,7 @@ EXAMPLES = r"""
community.general.nmcli:
conn_name: my-eth1
state: up
- reload: true
+ conn_reload: true
- name: Add second ip4 address
community.general.nmcli:
@@ -1688,6 +1723,8 @@ class Nmcli(object):
self.state = module.params['state']
self.ignore_unsupported_suboptions = module.params['ignore_unsupported_suboptions']
self.autoconnect = module.params['autoconnect']
+ self.autoconnect_priority = module.params['autoconnect_priority']
+ self.autoconnect_retries = module.params['autoconnect_retries']
self.conn_name = module.params['conn_name']
self.conn_reload = module.params['conn_reload']
self.slave_type = module.params['slave_type']
@@ -1803,10 +1840,7 @@ class Nmcli(object):
self.module.fail_json(msg="'master' option is required when 'slave_type' is specified.")
def execute_command(self, cmd, use_unsafe_shell=False, data=None):
- if isinstance(cmd, list):
- cmd = [to_text(item) for item in cmd]
- else:
- cmd = to_text(cmd)
+ cmd = [to_text(item) for item in cmd]
return self.module.run_command(cmd, use_unsafe_shell=use_unsafe_shell, data=data)
def execute_edit_commands(self, commands, arguments):
@@ -1819,6 +1853,8 @@ class Nmcli(object):
# Options common to multiple connection types.
options = {
'connection.autoconnect': self.autoconnect,
+ 'connection.autoconnect-priority': self.autoconnect_priority,
+ 'connection.autoconnect-retries': self.autoconnect_retries,
'connection.zone': self.zone,
}
@@ -2231,7 +2267,7 @@ class Nmcli(object):
@staticmethod
def settings_type(setting):
- if setting in ('bridge.stp',
+ if setting in {'bridge.stp',
'bridge-port.hairpin-mode',
'connection.autoconnect',
'ipv4.never-default',
@@ -2241,9 +2277,9 @@ class Nmcli(object):
'ipv6.ignore-auto-dns',
'ipv6.ignore-auto-routes',
'802-11-wireless.hidden',
- 'team.runner-fast-rate'):
+ 'team.runner-fast-rate'}:
return bool
- elif setting in ('ipv4.addresses',
+ elif setting in {'ipv4.addresses',
'ipv6.addresses',
'ipv4.dns',
'ipv4.dns-search',
@@ -2260,8 +2296,11 @@ class Nmcli(object):
'802-11-wireless-security.proto',
'802-11-wireless-security.psk-flags',
'802-11-wireless-security.wep-key-flags',
- '802-11-wireless.mac-address-blacklist'):
+ '802-11-wireless.mac-address-blacklist'}:
return list
+ elif setting in {'connection.autoconnect-priority',
+ 'connection.autoconnect-retries'}:
+ return int
return str
def get_route_params(self, raw_values):
@@ -2443,7 +2482,7 @@ class Nmcli(object):
for line in out.splitlines():
prefix = '%s.' % setting
- if (line.startswith(prefix)):
+ if line.startswith(prefix):
pair = line.split(':', 1)
property = pair[0].strip().replace(prefix, '')
properties.append(property)
@@ -2571,6 +2610,8 @@ def main():
argument_spec=dict(
ignore_unsupported_suboptions=dict(type='bool', default=False),
autoconnect=dict(type='bool', default=True),
+ autoconnect_priority=dict(type='int'),
+ autoconnect_retries=dict(type='int'),
state=dict(type='str', required=True, choices=['absent', 'present', 'up', 'down']),
conn_name=dict(type='str', required=True),
conn_reload=dict(type='bool', default=False),
@@ -2725,7 +2766,11 @@ def main():
mutually_exclusive=[['never_default4', 'gw4'],
['routes4_extended', 'routes4'],
['routes6_extended', 'routes6']],
- required_if=[("type", "wifi", [("ssid")])],
+ required_if=[
+ ("type", "wifi", ["ssid"]),
+ ("type", "team-slave", ["master", "ifname"]),
+ ("slave_type", "team", ["master", "ifname"]),
+ ],
supports_check_mode=True,
)
module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C')
@@ -2735,21 +2780,12 @@ def main():
(rc, out, err) = (None, '', '')
result = {'conn_name': nmcli.conn_name, 'state': nmcli.state}
- # check for issues
- if nmcli.conn_name is None:
- nmcli.module.fail_json(msg="Please specify a name for the connection")
# team checks
if nmcli.type == "team":
if nmcli.runner_hwaddr_policy and not nmcli.runner == "activebackup":
nmcli.module.fail_json(msg="Runner-hwaddr-policy is only allowed for runner activebackup")
if nmcli.runner_fast_rate is not None and nmcli.runner != "lacp":
nmcli.module.fail_json(msg="runner-fast-rate is only allowed for runner lacp")
- # team-slave checks
- if nmcli.type == 'team-slave' or nmcli.slave_type == 'team':
- if nmcli.master is None:
- nmcli.module.fail_json(msg="Please specify a name for the master when type is %s" % nmcli.type)
- if nmcli.ifname is None:
- nmcli.module.fail_json(msg="Please specify an interface name for the connection when type is %s" % nmcli.type)
if nmcli.type == 'wifi':
unsupported_properties = {}
if nmcli.wifi:
@@ -2772,7 +2808,7 @@ def main():
(rc, out, err) = nmcli.down_connection()
(rc, out, err) = nmcli.remove_connection()
if rc != 0:
- module.fail_json(name=('No Connection named %s exists' % nmcli.conn_name), msg=err, rc=rc)
+ module.fail_json(name=('Error removing connection named %s' % nmcli.conn_name), msg=err, rc=rc)
elif nmcli.state == 'present':
if nmcli.connection_exists():
@@ -2809,7 +2845,7 @@ def main():
(rc, out, err) = nmcli.reload_connection()
(rc, out, err) = nmcli.up_connection()
if rc != 0:
- module.fail_json(name=('No Connection named %s exists' % nmcli.conn_name), msg=err, rc=rc)
+ module.fail_json(name=('Error bringing up connection named %s' % nmcli.conn_name), msg=err, rc=rc)
elif nmcli.state == 'down':
if nmcli.connection_exists():
@@ -2819,7 +2855,7 @@ def main():
(rc, out, err) = nmcli.reload_connection()
(rc, out, err) = nmcli.down_connection()
if rc != 0:
- module.fail_json(name=('No Connection named %s exists' % nmcli.conn_name), msg=err, rc=rc)
+ module.fail_json(name=('Error bringing down connection named %s' % nmcli.conn_name), msg=err, rc=rc)
except NmcliModuleError as e:
module.fail_json(name=nmcli.conn_name, msg=str(e))
diff --git a/plugins/modules/nomad_job_info.py b/plugins/modules/nomad_job_info.py
index 0a5c81cf15..b3703b64ce 100644
--- a/plugins/modules/nomad_job_info.py
+++ b/plugins/modules/nomad_job_info.py
@@ -49,219 +49,219 @@ EXAMPLES = r"""
RETURN = r"""
result:
- description: List with dictionary contains jobs info
- returned: success
- type: list
- sample: [
- {
+ description: List with dictionary contains jobs info.
+ returned: success
+ type: list
+ sample:
+ [
+ {
+ "Affinities": null,
+ "AllAtOnce": false,
+ "Constraints": null,
+ "ConsulToken": "",
+ "CreateIndex": 13,
+ "Datacenters": [
+ "dc1"
+ ],
+ "Dispatched": false,
+ "ID": "example",
+ "JobModifyIndex": 13,
+ "Meta": null,
+ "ModifyIndex": 13,
+ "Multiregion": null,
+ "Name": "example",
+ "Namespace": "default",
+ "NomadTokenID": "",
+ "ParameterizedJob": null,
+ "ParentID": "",
+ "Payload": null,
+ "Periodic": null,
+ "Priority": 50,
+ "Region": "global",
+ "Spreads": null,
+ "Stable": false,
+ "Status": "pending",
+ "StatusDescription": "",
+ "Stop": false,
+ "SubmitTime": 1602244370615307000,
+ "TaskGroups": [
+ {
"Affinities": null,
- "AllAtOnce": false,
"Constraints": null,
- "ConsulToken": "",
- "CreateIndex": 13,
- "Datacenters": [
- "dc1"
- ],
- "Dispatched": false,
- "ID": "example",
- "JobModifyIndex": 13,
- "Meta": null,
- "ModifyIndex": 13,
- "Multiregion": null,
- "Name": "example",
- "Namespace": "default",
- "NomadTokenID": "",
- "ParameterizedJob": null,
- "ParentID": "",
- "Payload": null,
- "Periodic": null,
- "Priority": 50,
- "Region": "global",
- "Spreads": null,
- "Stable": false,
- "Status": "pending",
- "StatusDescription": "",
- "Stop": false,
- "SubmitTime": 1602244370615307000,
- "TaskGroups": [
- {
- "Affinities": null,
- "Constraints": null,
- "Count": 1,
- "EphemeralDisk": {
- "Migrate": false,
- "SizeMB": 300,
- "Sticky": false
- },
- "Meta": null,
- "Migrate": {
- "HealthCheck": "checks",
- "HealthyDeadline": 300000000000,
- "MaxParallel": 1,
- "MinHealthyTime": 10000000000
- },
- "Name": "cache",
- "Networks": null,
- "ReschedulePolicy": {
- "Attempts": 0,
- "Delay": 30000000000,
- "DelayFunction": "exponential",
- "Interval": 0,
- "MaxDelay": 3600000000000,
- "Unlimited": true
- },
- "RestartPolicy": {
- "Attempts": 3,
- "Delay": 15000000000,
- "Interval": 1800000000000,
- "Mode": "fail"
- },
- "Scaling": null,
- "Services": null,
- "ShutdownDelay": null,
- "Spreads": null,
- "StopAfterClientDisconnect": null,
- "Tasks": [
- {
- "Affinities": null,
- "Artifacts": null,
- "CSIPluginConfig": null,
- "Config": {
- "image": "redis:3.2",
- "port_map": [
- {
- "db": 6379.0
- }
- ]
- },
- "Constraints": null,
- "DispatchPayload": null,
- "Driver": "docker",
- "Env": null,
- "KillSignal": "",
- "KillTimeout": 5000000000,
- "Kind": "",
- "Leader": false,
- "Lifecycle": null,
- "LogConfig": {
- "MaxFileSizeMB": 10,
- "MaxFiles": 10
- },
- "Meta": null,
- "Name": "redis",
- "Resources": {
- "CPU": 500,
- "Devices": null,
- "DiskMB": 0,
- "IOPS": 0,
- "MemoryMB": 256,
- "Networks": [
- {
- "CIDR": "",
- "DNS": null,
- "Device": "",
- "DynamicPorts": [
- {
- "HostNetwork": "default",
- "Label": "db",
- "To": 0,
- "Value": 0
- }
- ],
- "IP": "",
- "MBits": 10,
- "Mode": "",
- "ReservedPorts": null
- }
- ]
- },
- "RestartPolicy": {
- "Attempts": 3,
- "Delay": 15000000000,
- "Interval": 1800000000000,
- "Mode": "fail"
- },
- "Services": [
- {
- "AddressMode": "auto",
- "CanaryMeta": null,
- "CanaryTags": null,
- "Checks": [
- {
- "AddressMode": "",
- "Args": null,
- "CheckRestart": null,
- "Command": "",
- "Expose": false,
- "FailuresBeforeCritical": 0,
- "GRPCService": "",
- "GRPCUseTLS": false,
- "Header": null,
- "InitialStatus": "",
- "Interval": 10000000000,
- "Method": "",
- "Name": "alive",
- "Path": "",
- "PortLabel": "",
- "Protocol": "",
- "SuccessBeforePassing": 0,
- "TLSSkipVerify": false,
- "TaskName": "",
- "Timeout": 2000000000,
- "Type": "tcp"
- }
- ],
- "Connect": null,
- "EnableTagOverride": false,
- "Meta": null,
- "Name": "redis-cache",
- "PortLabel": "db",
- "Tags": [
- "global",
- "cache"
- ],
- "TaskName": ""
- }
- ],
- "ShutdownDelay": 0,
- "Templates": null,
- "User": "",
- "Vault": null,
- "VolumeMounts": null
- }
- ],
- "Update": {
- "AutoPromote": false,
- "AutoRevert": false,
- "Canary": 0,
- "HealthCheck": "checks",
- "HealthyDeadline": 180000000000,
- "MaxParallel": 1,
- "MinHealthyTime": 10000000000,
- "ProgressDeadline": 600000000000,
- "Stagger": 30000000000
- },
- "Volumes": null
- }
- ],
- "Type": "service",
- "Update": {
- "AutoPromote": false,
- "AutoRevert": false,
- "Canary": 0,
- "HealthCheck": "",
- "HealthyDeadline": 0,
- "MaxParallel": 1,
- "MinHealthyTime": 0,
- "ProgressDeadline": 0,
- "Stagger": 30000000000
+ "Count": 1,
+ "EphemeralDisk": {
+ "Migrate": false,
+ "SizeMB": 300,
+ "Sticky": false
},
- "VaultNamespace": "",
- "VaultToken": "",
- "Version": 0
- }
+ "Meta": null,
+ "Migrate": {
+ "HealthCheck": "checks",
+ "HealthyDeadline": 300000000000,
+ "MaxParallel": 1,
+ "MinHealthyTime": 10000000000
+ },
+ "Name": "cache",
+ "Networks": null,
+ "ReschedulePolicy": {
+ "Attempts": 0,
+ "Delay": 30000000000,
+ "DelayFunction": "exponential",
+ "Interval": 0,
+ "MaxDelay": 3600000000000,
+ "Unlimited": true
+ },
+ "RestartPolicy": {
+ "Attempts": 3,
+ "Delay": 15000000000,
+ "Interval": 1800000000000,
+ "Mode": "fail"
+ },
+ "Scaling": null,
+ "Services": null,
+ "ShutdownDelay": null,
+ "Spreads": null,
+ "StopAfterClientDisconnect": null,
+ "Tasks": [
+ {
+ "Affinities": null,
+ "Artifacts": null,
+ "CSIPluginConfig": null,
+ "Config": {
+ "image": "redis:3.2",
+ "port_map": [
+ {
+ "db": 6379.0
+ }
+ ]
+ },
+ "Constraints": null,
+ "DispatchPayload": null,
+ "Driver": "docker",
+ "Env": null,
+ "KillSignal": "",
+ "KillTimeout": 5000000000,
+ "Kind": "",
+ "Leader": false,
+ "Lifecycle": null,
+ "LogConfig": {
+ "MaxFileSizeMB": 10,
+ "MaxFiles": 10
+ },
+ "Meta": null,
+ "Name": "redis",
+ "Resources": {
+ "CPU": 500,
+ "Devices": null,
+ "DiskMB": 0,
+ "IOPS": 0,
+ "MemoryMB": 256,
+ "Networks": [
+ {
+ "CIDR": "",
+ "DNS": null,
+ "Device": "",
+ "DynamicPorts": [
+ {
+ "HostNetwork": "default",
+ "Label": "db",
+ "To": 0,
+ "Value": 0
+ }
+ ],
+ "IP": "",
+ "MBits": 10,
+ "Mode": "",
+ "ReservedPorts": null
+ }
+ ]
+ },
+ "RestartPolicy": {
+ "Attempts": 3,
+ "Delay": 15000000000,
+ "Interval": 1800000000000,
+ "Mode": "fail"
+ },
+ "Services": [
+ {
+ "AddressMode": "auto",
+ "CanaryMeta": null,
+ "CanaryTags": null,
+ "Checks": [
+ {
+ "AddressMode": "",
+ "Args": null,
+ "CheckRestart": null,
+ "Command": "",
+ "Expose": false,
+ "FailuresBeforeCritical": 0,
+ "GRPCService": "",
+ "GRPCUseTLS": false,
+ "Header": null,
+ "InitialStatus": "",
+ "Interval": 10000000000,
+ "Method": "",
+ "Name": "alive",
+ "Path": "",
+ "PortLabel": "",
+ "Protocol": "",
+ "SuccessBeforePassing": 0,
+ "TLSSkipVerify": false,
+ "TaskName": "",
+ "Timeout": 2000000000,
+ "Type": "tcp"
+ }
+ ],
+ "Connect": null,
+ "EnableTagOverride": false,
+ "Meta": null,
+ "Name": "redis-cache",
+ "PortLabel": "db",
+ "Tags": [
+ "global",
+ "cache"
+ ],
+ "TaskName": ""
+ }
+ ],
+ "ShutdownDelay": 0,
+ "Templates": null,
+ "User": "",
+ "Vault": null,
+ "VolumeMounts": null
+ }
+ ],
+ "Update": {
+ "AutoPromote": false,
+ "AutoRevert": false,
+ "Canary": 0,
+ "HealthCheck": "checks",
+ "HealthyDeadline": 180000000000,
+ "MaxParallel": 1,
+ "MinHealthyTime": 10000000000,
+ "ProgressDeadline": 600000000000,
+ "Stagger": 30000000000
+ },
+ "Volumes": null
+ }
+ ],
+ "Type": "service",
+ "Update": {
+ "AutoPromote": false,
+ "AutoRevert": false,
+ "Canary": 0,
+ "HealthCheck": "",
+ "HealthyDeadline": 0,
+ "MaxParallel": 1,
+ "MinHealthyTime": 0,
+ "ProgressDeadline": 0,
+ "Stagger": 30000000000
+ },
+ "VaultNamespace": "",
+ "VaultToken": "",
+ "Version": 0
+ }
]
-
"""
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
diff --git a/plugins/modules/nomad_token.py b/plugins/modules/nomad_token.py
index 07abd9d7c3..c189bf4b85 100644
--- a/plugins/modules/nomad_token.py
+++ b/plugins/modules/nomad_token.py
@@ -98,25 +98,26 @@ EXAMPLES = r"""
RETURN = r"""
result:
- description: Result returned by nomad.
- returned: always
- type: dict
- sample: {
- "accessor_id": "0d01c55f-8d63-f832-04ff-1866d4eb594e",
- "create_index": 14,
- "create_time": "2023-11-12T18:48:34.248857001Z",
- "expiration_time": null,
- "expiration_ttl": "",
- "global": true,
- "hash": "eSn8H8RVqh8As8WQNnC2vlBRqXy6DECogc5umzX0P30=",
- "modify_index": 836,
- "name": "devs",
- "policies": [
- "readonly"
- ],
- "roles": null,
- "secret_id": "12e878ab-e1f6-e103-b4c4-3b5173bb4cea",
- "type": "client"
+ description: Result returned by nomad.
+ returned: always
+ type: dict
+ sample:
+ {
+ "accessor_id": "0d01c55f-8d63-f832-04ff-1866d4eb594e",
+ "create_index": 14,
+ "create_time": "2023-11-12T18:48:34.248857001Z",
+ "expiration_time": null,
+ "expiration_ttl": "",
+ "global": true,
+ "hash": "eSn8H8RVqh8As8WQNnC2vlBRqXy6DECogc5umzX0P30=",
+ "modify_index": 836,
+ "name": "devs",
+ "policies": [
+ "readonly"
+ ],
+ "roles": null,
+ "secret_id": "12e878ab-e1f6-e103-b4c4-3b5173bb4cea",
+ "type": "client"
}
"""
diff --git a/plugins/modules/nosh.py b/plugins/modules/nosh.py
index da9db091bc..7cd4f4ad66 100644
--- a/plugins/modules/nosh.py
+++ b/plugins/modules/nosh.py
@@ -35,22 +35,22 @@ options:
required: false
choices: [started, stopped, reset, restarted, reloaded]
description:
- - V(started)/V(stopped) are idempotent actions that will not run commands unless necessary.
- - V(restarted) will always bounce the service.
- - V(reloaded) will send a SIGHUP or start the service.
- - V(reset) will start or stop the service according to whether it is enabled or not.
+ - V(started)/V(stopped) are idempotent actions that do not run commands unless necessary.
+ - V(restarted) always bounces the service.
+ - V(reloaded) sends a SIGHUP or starts the service.
+ - V(reset) starts or stops the service according to whether it is enabled or not.
enabled:
required: false
type: bool
description:
- Enable or disable the service, independently of C(*.preset) file preference or running state. Mutually exclusive with
- O(preset). Will take effect prior to O(state=reset).
+ O(preset). It takes effect prior to O(state=reset).
preset:
required: false
type: bool
description:
- Enable or disable the service according to local preferences in C(*.preset) files. Mutually exclusive with O(enabled).
- Only has an effect if set to true. Will take effect prior to O(state=reset).
+ Only has an effect if set to true. It takes effect prior to O(state=reset).
user:
required: false
default: false
diff --git a/plugins/modules/nsupdate.py b/plugins/modules/nsupdate.py
index 9f665626b2..4049996ca3 100644
--- a/plugins/modules/nsupdate.py
+++ b/plugins/modules/nsupdate.py
@@ -63,8 +63,8 @@ options:
type: str
zone:
description:
- - DNS record will be modified on this O(zone).
- - When omitted DNS will be queried to attempt finding the correct zone.
+ - DNS record is modified on this O(zone).
+ - When omitted, DNS is queried to attempt finding the correct zone.
type: str
record:
description:
@@ -144,10 +144,6 @@ EXAMPLES = r"""
"""
RETURN = r"""
-changed:
- description: If module has modified record.
- returned: success
- type: str
record:
description: DNS record.
returned: success
@@ -476,18 +472,18 @@ def main():
module = AnsibleModule(
argument_spec=dict(
- state=dict(required=False, default='present', choices=['present', 'absent'], type='str'),
+ state=dict(default='present', choices=['present', 'absent'], type='str'),
server=dict(required=True, type='str'),
- port=dict(required=False, default=53, type='int'),
- key_name=dict(required=False, type='str'),
- key_secret=dict(required=False, type='str', no_log=True),
- key_algorithm=dict(required=False, default='hmac-md5', choices=tsig_algs, type='str'),
- zone=dict(required=False, default=None, type='str'),
+ port=dict(default=53, type='int'),
+ key_name=dict(type='str'),
+ key_secret=dict(type='str', no_log=True),
+ key_algorithm=dict(default='hmac-md5', choices=tsig_algs, type='str'),
+ zone=dict(type='str'),
record=dict(required=True, type='str'),
- type=dict(required=False, default='A', type='str'),
- ttl=dict(required=False, default=3600, type='int'),
- value=dict(required=False, default=None, type='list', elements='str'),
- protocol=dict(required=False, default='tcp', choices=['tcp', 'udp'], type='str')
+ type=dict(default='A', type='str'),
+ ttl=dict(default=3600, type='int'),
+ value=dict(type='list', elements='str'),
+ protocol=dict(default='tcp', choices=['tcp', 'udp'], type='str')
),
supports_check_mode=True
)
diff --git a/plugins/modules/ocapi_info.py b/plugins/modules/ocapi_info.py
index f4a216a47d..150b3ad7e2 100644
--- a/plugins/modules/ocapi_info.py
+++ b/plugins/modules/ocapi_info.py
@@ -120,24 +120,25 @@ details:
elements: str
status:
- description: Dictionary containing status information. See OCAPI documentation for details.
- returned: when supported
- type: dict
- sample: {
- "Details": [
- "None"
- ],
- "Health": [
- {
- "ID": 5,
- "Name": "OK"
- }
- ],
- "State": {
- "ID": 16,
- "Name": "In service"
- }
- }
+ description: Dictionary containing status information. See OCAPI documentation for details.
+ returned: when supported
+ type: dict
+ sample:
+ {
+ "Details": [
+ "None"
+ ],
+ "Health": [
+ {
+ "ID": 5,
+ "Name": "OK"
+ }
+ ],
+ "State": {
+ "ID": 16,
+ "Name": "In service"
+ }
+ }
"""
from ansible.module_utils.basic import AnsibleModule
diff --git a/plugins/modules/oci_vcn.py b/plugins/modules/oci_vcn.py
index 47d38137ea..56a637ac2c 100644
--- a/plugins/modules/oci_vcn.py
+++ b/plugins/modules/oci_vcn.py
@@ -78,22 +78,23 @@ EXAMPLES = r"""
RETURN = r"""
vcn:
- description: Information about the VCN
- returned: On successful create and update operation
- type: dict
- sample: {
- "cidr_block": "10.0.0.0/16",
- compartment_id": "ocid1.compartment.oc1..xxxxxEXAMPLExxxxx",
- "default_dhcp_options_id": "ocid1.dhcpoptions.oc1.phx.xxxxxEXAMPLExxxxx",
- "default_route_table_id": "ocid1.routetable.oc1.phx.xxxxxEXAMPLExxxxx",
- "default_security_list_id": "ocid1.securitylist.oc1.phx.xxxxxEXAMPLExxxxx",
- "display_name": "ansible_vcn",
- "dns_label": "ansiblevcn",
- "id": "ocid1.vcn.oc1.phx.xxxxxEXAMPLExxxxx",
- "lifecycle_state": "AVAILABLE",
- "time_created": "2017-11-13T20:22:40.626000+00:00",
- "vcn_domain_name": "ansiblevcn.oraclevcn.com"
- }
+ description: Information about the VCN.
+ returned: On successful create and update operation
+ type: dict
+ sample:
+ {
+ "cidr_block": "10.0.0.0/16",
+ "compartment_id\"": "ocid1.compartment.oc1..xxxxxEXAMPLExxxxx",
+ "default_dhcp_options_id": "ocid1.dhcpoptions.oc1.phx.xxxxxEXAMPLExxxxx",
+ "default_route_table_id": "ocid1.routetable.oc1.phx.xxxxxEXAMPLExxxxx",
+ "default_security_list_id": "ocid1.securitylist.oc1.phx.xxxxxEXAMPLExxxxx",
+ "display_name": "ansible_vcn",
+ "dns_label": "ansiblevcn",
+ "id": "ocid1.vcn.oc1.phx.xxxxxEXAMPLExxxxx",
+ "lifecycle_state": "AVAILABLE",
+ "time_created": "2017-11-13T20:22:40.626000+00:00",
+ "vcn_domain_name": "ansiblevcn.oraclevcn.com"
+ }
"""
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
@@ -161,17 +162,12 @@ def main():
)
module_args.update(
dict(
- cidr_block=dict(type="str", required=False),
- compartment_id=dict(type="str", required=False),
- display_name=dict(type="str", required=False, aliases=["name"]),
- dns_label=dict(type="str", required=False),
- state=dict(
- type="str",
- required=False,
- default="present",
- choices=["absent", "present"],
- ),
- vcn_id=dict(type="str", required=False, aliases=["id"]),
+ cidr_block=dict(type="str"),
+ compartment_id=dict(type="str"),
+ display_name=dict(type="str", aliases=["name"]),
+ dns_label=dict(type="str"),
+ state=dict(type="str", default="present", choices=["absent", "present"]),
+ vcn_id=dict(type="str", aliases=["id"]),
)
)
diff --git a/plugins/modules/odbc.py b/plugins/modules/odbc.py
index 54c923cf1e..41b5df4f08 100644
--- a/plugins/modules/odbc.py
+++ b/plugins/modules/odbc.py
@@ -66,6 +66,7 @@ EXAMPLES = r"""
changed_when: false
"""
+# @FIXME RV 'results' is meant to be used when 'loop:' was used with the module.
RETURN = r"""
results:
description: List of lists of strings containing selected rows, likely empty for DDL statements.
diff --git a/plugins/modules/office_365_connector_card.py b/plugins/modules/office_365_connector_card.py
index 8ff82fecc1..6b8384a7ca 100644
--- a/plugins/modules/office_365_connector_card.py
+++ b/plugins/modules/office_365_connector_card.py
@@ -17,7 +17,7 @@ description:
U(https://learn.microsoft.com/en-us/microsoftteams/platform/task-modules-and-cards/cards/cards-reference#connector-card-for-microsoft-365-groups).
author: "Marc Sensenich (@marc-sensenich)"
notes:
- - This module is not idempotent, therefore if the same task is run twice there will be two Connector Cards created.
+ - This module is not idempotent, therefore if you run the same task twice then you create two Connector Cards.
extends_documentation_fragment:
- community.general.attributes
attributes:
@@ -35,7 +35,7 @@ options:
type: str
description:
- A string used for summarizing card content.
- - This will be shown as the message subject.
+ - This is the message subject.
- This is required if the text parameter is not populated.
color:
type: str
@@ -49,13 +49,13 @@ options:
type: str
description:
- The main text of the card.
- - This will be rendered below the sender information and optional title,
+ - This is rendered below the sender information and optional title,
- And above any sections or actions present.
actions:
type: list
elements: dict
description:
- - This array of objects will power the action links found at the bottom of the card.
+ - This array of objects is used to power the action links found at the bottom of the card.
sections:
type: list
elements: dict
diff --git a/plugins/modules/one_host.py b/plugins/modules/one_host.py
index 8246172c90..7c43da3058 100644
--- a/plugins/modules/one_host.py
+++ b/plugins/modules/one_host.py
@@ -36,8 +36,8 @@ options:
state:
description:
- Takes the host to the desired lifecycle state.
- - If V(absent) the host will be deleted from the cluster.
- - If V(present) the host will be created in the cluster (includes V(enabled), V(disabled) and V(offline) states).
+ - If V(absent) the host is deleted from the cluster.
+ - If V(present) the host is created in the cluster (includes V(enabled), V(disabled) and V(offline) states).
- If V(enabled) the host is fully operational.
- V(disabled), for example to perform maintenance operations.
- V(offline), host is totally offline.
diff --git a/plugins/modules/one_image.py b/plugins/modules/one_image.py
index e5ffb68b4f..d9a21f86b7 100644
--- a/plugins/modules/one_image.py
+++ b/plugins/modules/one_image.py
@@ -48,8 +48,8 @@ options:
type: bool
new_name:
description:
- - A name that will be assigned to the existing or new image.
- - In the case of cloning, by default O(new_name) will take the name of the origin image with the prefix 'Copy of'.
+ - A name that is assigned to the existing or new image.
+ - In the case of cloning, by default O(new_name) is set to the name of the origin image with the prefix 'Copy of'.
type: str
persistent:
description:
@@ -325,7 +325,7 @@ datastore:
returned: when O(state=present), O(state=cloned), or O(state=renamed)
version_added: 9.5.0
vms:
- description: The image's list of vm ID's.
+ description: The image's list of VM ID's.
type: list
elements: int
returned: when O(state=present), O(state=cloned), or O(state=renamed)
diff --git a/plugins/modules/one_image_info.py b/plugins/modules/one_image_info.py
index 7e5def76fb..f940444cad 100644
--- a/plugins/modules/one_image_info.py
+++ b/plugins/modules/one_image_info.py
@@ -29,10 +29,10 @@ options:
elements: str
name:
description:
- - A O(name) of the image whose facts will be gathered.
- - If the O(name) begins with V(~) the O(name) will be used as regex pattern, which restricts the list of images (whose
- facts will be returned) whose names match specified regex.
- - Also, if the O(name) begins with V(~*) case-insensitive matching will be performed.
+ - A O(name) of the image whose facts is gathered.
+ - If the O(name) begins with V(~) the O(name) is used as regex pattern, which restricts the list of images (whose facts
+ is returned) whose names match specified regex.
+ - Also, if the O(name) begins with V(~*) case-insensitive matching is performed.
- See examples for more details.
type: str
author:
@@ -231,7 +231,7 @@ images:
sample: image_datastore
version_added: 9.5.0
vms:
- description: The image's list of vm ID's.
+ description: The image's list of VM ID's.
type: list
elements: int
version_added: 9.5.0
@@ -281,8 +281,8 @@ IMAGE_STATES = ['INIT', 'READY', 'USED', 'DISABLED', 'LOCKED', 'ERROR', 'CLONE',
class ImageInfoModule(OpenNebulaModule):
def __init__(self):
argument_spec = dict(
- ids=dict(type='list', aliases=['id'], elements='str', required=False),
- name=dict(type='str', required=False),
+ ids=dict(type='list', aliases=['id'], elements='str'),
+ name=dict(type='str'),
)
mutually_exclusive = [
['ids', 'name'],
diff --git a/plugins/modules/one_service.py b/plugins/modules/one_service.py
index 8244e0ca5a..88ccd29d74 100644
--- a/plugins/modules/one_service.py
+++ b/plugins/modules/one_service.py
@@ -55,7 +55,7 @@ options:
type: str
unique:
description:
- - Setting O(unique=true) will make sure that there is only one service instance running with a name set with O(service_name)
+ - Setting O(unique=true) ensures that there is only one service instance running with a name set with O(service_name)
when instantiating a service from a template specified with O(template_id) or O(template_name). Check examples below.
type: bool
default: false
@@ -73,11 +73,11 @@ options:
type: str
owner_id:
description:
- - ID of the user which will be set as the owner of the service.
+ - ID of the user which is set as the owner of the service.
type: int
group_id:
description:
- - ID of the group which will be set as the group of the service.
+ - ID of the group which is set as the group of the service.
type: int
wait:
description:
@@ -91,7 +91,7 @@ options:
type: int
custom_attrs:
description:
- - Dictionary of key/value custom attributes which will be used when instantiating a new service.
+ - Dictionary of key/value custom attributes which is used when instantiating a new service.
default: {}
type: dict
role:
diff --git a/plugins/modules/one_template.py b/plugins/modules/one_template.py
index 71db2c1d2c..091c4c55a7 100644
--- a/plugins/modules/one_template.py
+++ b/plugins/modules/one_template.py
@@ -33,12 +33,12 @@ attributes:
options:
id:
description:
- - A O(id) of the template you would like to manage. If not set then a new template will be created with the given O(name).
+ - A O(id) of the template you would like to manage. If not set then a new template is created with the given O(name).
type: int
name:
description:
- - A O(name) of the template you would like to manage. If a template with the given name does not exist it will be created,
- otherwise it will be managed by this module.
+ - A O(name) of the template you would like to manage. If a template with the given name does not exist it is created,
+ otherwise it is managed by this module.
type: str
template:
description:
@@ -163,11 +163,11 @@ from ansible_collections.community.general.plugins.module_utils.opennebula impor
class TemplateModule(OpenNebulaModule):
def __init__(self):
argument_spec = dict(
- id=dict(type='int', required=False),
- name=dict(type='str', required=False),
+ id=dict(type='int'),
+ name=dict(type='str'),
state=dict(type='str', choices=['present', 'absent'], default='present'),
- template=dict(type='str', required=False),
- filter=dict(type='str', required=False, choices=['user_primary_group', 'user', 'all', 'user_groups'], default='user'),
+ template=dict(type='str'),
+ filter=dict(type='str', choices=['user_primary_group', 'user', 'all', 'user_groups'], default='user'),
)
mutually_exclusive = [
diff --git a/plugins/modules/one_vm.py b/plugins/modules/one_vm.py
index 2139da5d4c..3d23efa036 100644
--- a/plugins/modules/one_vm.py
+++ b/plugins/modules/one_vm.py
@@ -39,7 +39,7 @@ options:
api_password:
description:
- Password of the user to login into OpenNebula RPC server. If not set then the value of the E(ONE_PASSWORD) environment
- variable is used. if both O(api_username) or O(api_password) are not set, then it will try authenticate with ONE auth
+ variable is used. if both O(api_username) or O(api_password) are not set, then it tries to authenticate with ONE auth
file. Default path is C(~/.one/one_auth).
- Set environment variable E(ONE_AUTH) to override this path.
type: str
@@ -53,7 +53,7 @@ options:
type: int
vm_start_on_hold:
description:
- - Set to true to put vm on hold while creating.
+ - Set to true to put VM on hold while creating.
default: false
type: bool
instance_ids:
@@ -80,7 +80,7 @@ options:
wait:
description:
- Wait for the instance to reach its desired state before returning. Keep in mind if you are waiting for instance to
- be in running state it does not mean that you will be able to SSH on that machine only that boot process have started
+ be in running state it does not mean that you are able to SSH on that machine only that boot process have started
on that instance. See the example using the M(ansible.builtin.wait_for) module for details.
default: true
type: bool
@@ -94,9 +94,9 @@ options:
- A dictionary of key/value attributes to add to new instances, or for setting C(state) of instances with these attributes.
- Keys are case insensitive and OpenNebula automatically converts them to upper case.
- Be aware V(NAME) is a special attribute which sets the name of the VM when it is deployed.
- - C(#) character(s) can be appended to the C(NAME) and the module will automatically add indexes to the names of VMs.
+ - C(#) character(s) can be appended to the C(NAME) and the module automatically adds indexes to the names of VMs.
- 'For example: V(NAME: foo-###) would create VMs with names V(foo-000), V(foo-001),...'
- - When used with O(count_attributes) and O(exact_count) the module will match the base name without the index part.
+ - When used with O(count_attributes) and O(exact_count) the module matches the base name without the index part.
default: {}
type: dict
labels:
@@ -126,7 +126,7 @@ options:
description:
- Indicates how many instances that match O(count_attributes) and O(count_labels) parameters should be deployed. Instances
are either created or terminated based on this value.
- - B(NOTE:) Instances with the least IDs will be terminated first.
+ - B(NOTE:) Instances with the least IDs are terminated first.
type: int
mode:
description:
@@ -135,11 +135,11 @@ options:
type: str
owner_id:
description:
- - ID of the user which will be set as the owner of the instance.
+ - ID of the user which is set as the owner of the instance.
type: int
group_id:
description:
- - ID of the group which will be set as the group of the instance.
+ - ID of the group which is set as the group of the instance.
type: int
memory:
description:
@@ -157,7 +157,7 @@ options:
type: float
vcpu:
description:
- - Number of CPUs (cores) new VM will have.
+ - Number of CPUs (cores) the new VM uses.
type: int
networks:
description:
@@ -170,9 +170,9 @@ options:
- Creates an image from a VM disk.
- It is a dictionary where you have to specify C(name) of the new image.
- Optionally you can specify C(disk_id) of the disk you want to save. By default C(disk_id) is 0.
- - B(NOTE:) This operation will only be performed on the first VM (if more than one VM ID is passed) and the VM has to
- be in the C(poweredoff) state.
- - Also this operation will fail if an image with specified C(name) already exists.
+ - B(NOTE:) This operation is only performed on the first VM (if more than one VM ID is passed) and the VM has to be
+ in the C(poweredoff) state.
+ - Also this operation fails if an image with specified C(name) already exists.
type: dict
persistent:
description:
@@ -195,14 +195,16 @@ options:
- When O(instance_ids) is provided, updates running VMs with the C(updateconf) API call.
- When new VMs are being created, emulates the C(updateconf) API call using direct template merge.
- Allows for complete modifications of the C(CONTEXT) attribute.
- - "Supported attributes include:"
+ - 'Supported attributes include:'
- B(BACKUP_CONFIG:) V(BACKUP_VOLATILE), V(FS_FREEZE), V(INCREMENT_MODE), V(KEEP_LAST), V(MODE);
- - B(CONTEXT:) (Any value, except V(ETH*). Variable substitution will be made);
+ - B(CONTEXT:) (Any value, except V(ETH*). Variable substitutions are made);
- B(CPU_MODEL:) V(FEATURES), V(MODEL);
- - B(FEATURES:) V(ACPI), V(APIC), V(GUEST_AGENT), V(HYPERV), V(IOTHREADS), V(LOCALTIME), V(PAE), V(VIRTIO_BLK_QUEUES), V(VIRTIO_SCSI_QUEUES);
+ - B(FEATURES:) V(ACPI), V(APIC), V(GUEST_AGENT), V(HYPERV), V(IOTHREADS), V(LOCALTIME), V(PAE), V(VIRTIO_BLK_QUEUES),
+ V(VIRTIO_SCSI_QUEUES);
- B(GRAPHICS:) V(COMMAND), V(KEYMAP), V(LISTEN), V(PASSWD), V(PORT), V(TYPE);
- B(INPUT:) V(BUS), V(TYPE);
- - B(OS:) V(ARCH), V(BOOT), V(BOOTLOADER), V(FIRMWARE), V(INITRD), V(KERNEL), V(KERNEL_CMD), V(MACHINE), V(ROOT), V(SD_DISK_BUS), V(UUID);
+ - B(OS:) V(ARCH), V(BOOT), V(BOOTLOADER), V(FIRMWARE), V(INITRD), V(KERNEL), V(KERNEL_CMD), V(MACHINE), V(ROOT), V(SD_DISK_BUS),
+ V(UUID);
- B(RAW:) V(DATA), V(DATA_VMX), V(TYPE), V(VALIDATE);
- B(VIDEO:) V(ATS), V(IOMMU), V(RESOLUTION), V(TYPE), V(VRAM).
type: dict
@@ -453,35 +455,35 @@ instances:
returned: success
contains:
vm_id:
- description: Vm ID.
+ description: VM ID.
type: int
sample: 153
vm_name:
- description: Vm name.
+ description: VM name.
type: str
sample: foo
template_id:
- description: Vm's template ID.
+ description: VM's template ID.
type: int
sample: 153
group_id:
- description: Vm's group ID.
+ description: VM's group ID.
type: int
sample: 1
group_name:
- description: Vm's group name.
+ description: VM's group name.
type: str
sample: one-users
owner_id:
- description: Vm's owner ID.
+ description: VM's owner ID.
type: int
sample: 143
owner_name:
- description: Vm's owner name.
+ description: VM's owner name.
type: str
sample: app-user
mode:
- description: Vm's mode.
+ description: VM's mode.
type: str
returned: success
sample: 660
@@ -512,20 +514,21 @@ instances:
networks:
description: A list of dictionaries with info about IP, NAME, MAC, SECURITY_GROUPS for each NIC.
type: list
- sample: [
- {
- "ip": "10.120.5.33",
- "mac": "02:00:0a:78:05:21",
- "name": "default-test-private",
- "security_groups": "0,10"
- },
- {
- "ip": "10.120.5.34",
- "mac": "02:00:0a:78:05:22",
- "name": "default-test-private",
- "security_groups": "0"
- }
- ]
+ sample:
+ [
+ {
+ "ip": "10.120.5.33",
+ "mac": "02:00:0a:78:05:21",
+ "name": "default-test-private",
+ "security_groups": "0,10"
+ },
+ {
+ "ip": "10.120.5.34",
+ "mac": "02:00:0a:78:05:22",
+ "name": "default-test-private",
+ "security_groups": "0"
+ }
+ ]
uptime_h:
description: Uptime of the instance in hours.
type: int
@@ -537,23 +540,27 @@ instances:
attributes:
description: A dictionary of key/values attributes that are associated with the instance.
type: dict
- sample: {
- "HYPERVISOR": "kvm",
- "LOGO": "images/logos/centos.png",
- "TE_GALAXY": "bar",
- "USER_INPUTS": null
- }
+ sample:
+ {
+ "HYPERVISOR": "kvm",
+ "LOGO": "images/logos/centos.png",
+ "TE_GALAXY": "bar",
+ "USER_INPUTS": null
+ }
updateconf:
description: A dictionary of key/values attributes that are set with the updateconf API call.
type: dict
version_added: 6.3.0
- sample: {
- "OS": { "ARCH": "x86_64" },
- "CONTEXT": {
- "START_SCRIPT": "ip r r 169.254.16.86/32 dev eth0",
- "SSH_PUBLIC_KEY": "ssh-rsa ...\\nssh-ed25519 ..."
+ sample:
+ {
+ "OS": {
+ "ARCH": "x86_64"
+ },
+ "CONTEXT": {
+ "START_SCRIPT": "ip r r 169.254.16.86/32 dev eth0",
+ "SSH_PUBLIC_KEY": "ssh-rsa ...\\nssh-ed25519 ..."
+ }
}
- }
tagged_instances:
description:
- A list of instances info based on a specific attributes and/or labels that are specified with O(count_attributes) and
@@ -562,35 +569,35 @@ tagged_instances:
returned: success
contains:
vm_id:
- description: Vm ID.
+ description: VM ID.
type: int
sample: 153
vm_name:
- description: Vm name.
+ description: VM name.
type: str
sample: foo
template_id:
- description: Vm's template ID.
+ description: VM's template ID.
type: int
sample: 153
group_id:
- description: Vm's group ID.
+ description: VM's group ID.
type: int
sample: 1
group_name:
- description: Vm's group name.
+ description: VM's group name.
type: str
sample: one-users
owner_id:
- description: Vm's user ID.
+ description: VM's user ID.
type: int
sample: 143
owner_name:
- description: Vm's user name.
+ description: VM's user name.
type: str
sample: app-user
mode:
- description: Vm's mode.
+ description: VM's mode.
type: str
returned: success
sample: 660
@@ -621,20 +628,21 @@ tagged_instances:
networks:
description: A list of dictionaries with info about IP, NAME, MAC, SECURITY_GROUPS for each NIC.
type: list
- sample: [
- {
- "ip": "10.120.5.33",
- "mac": "02:00:0a:78:05:21",
- "name": "default-test-private",
- "security_groups": "0,10"
- },
- {
- "ip": "10.120.5.34",
- "mac": "02:00:0a:78:05:22",
- "name": "default-test-private",
- "security_groups": "0"
- }
- ]
+ sample:
+ [
+ {
+ "ip": "10.120.5.33",
+ "mac": "02:00:0a:78:05:21",
+ "name": "default-test-private",
+ "security_groups": "0,10"
+ },
+ {
+ "ip": "10.120.5.34",
+ "mac": "02:00:0a:78:05:22",
+ "name": "default-test-private",
+ "security_groups": "0"
+ }
+ ]
uptime_h:
description: Uptime of the instance in hours.
type: int
@@ -646,12 +654,27 @@ tagged_instances:
attributes:
description: A dictionary of key/values attributes that are associated with the instance.
type: dict
- sample: {"HYPERVISOR": "kvm", "LOGO": "images/logos/centos.png", "TE_GALAXY": "bar", "USER_INPUTS": null}
+ sample:
+ {
+ "HYPERVISOR": "kvm",
+ "LOGO": "images/logos/centos.png",
+ "TE_GALAXY": "bar",
+ "USER_INPUTS": null
+ }
updateconf:
description: A dictionary of key/values attributes that are set with the updateconf API call.
type: dict
version_added: 6.3.0
- sample: {"OS": {"ARCH": "x86_64"}, "CONTEXT": {"START_SCRIPT": "ip r r 169.254.16.86/32 dev eth0", "SSH_PUBLIC_KEY": "ssh-rsa ...\\nssh-ed25519 ..."}}
+ sample:
+ {
+ "OS": {
+ "ARCH": "x86_64"
+ },
+ "CONTEXT": {
+ "START_SCRIPT": "ip r r 169.254.16.86/32 dev eth0",
+ "SSH_PUBLIC_KEY": "ssh-rsa ...\\nssh-ed25519 ..."
+ }
+ }
"""
try:
diff --git a/plugins/modules/one_vnet.py b/plugins/modules/one_vnet.py
index b8cb0c6559..b77530e756 100644
--- a/plugins/modules/one_vnet.py
+++ b/plugins/modules/one_vnet.py
@@ -30,12 +30,12 @@ options:
id:
description:
- A O(id) of the network you would like to manage.
- - If not set then a new network will be created with the given O(name).
+ - If not set then a new network is created with the given O(name).
type: int
name:
description:
- - A O(name) of the network you would like to manage. If a network with the given name does not exist it will be created,
- otherwise it will be managed by this module.
+ - A O(name) of the network you would like to manage. If a network with the given name does not exist it, then is created,
+ otherwise it is managed by this module.
type: str
template:
description:
@@ -263,10 +263,10 @@ class NetworksModule(OpenNebulaModule):
def __init__(self):
argument_spec = dict(
- id=dict(type='int', required=False),
- name=dict(type='str', required=False),
+ id=dict(type='int'),
+ name=dict(type='str'),
state=dict(type='str', choices=['present', 'absent'], default='present'),
- template=dict(type='str', required=False),
+ template=dict(type='str'),
)
mutually_exclusive = [
diff --git a/plugins/modules/oneandone_firewall_policy.py b/plugins/modules/oneandone_firewall_policy.py
index 743694cf90..eca9a8ed70 100644
--- a/plugins/modules/oneandone_firewall_policy.py
+++ b/plugins/modules/oneandone_firewall_policy.py
@@ -46,14 +46,14 @@ options:
type: str
rules:
description:
- - A list of rules that will be set for the firewall policy. Each rule must contain protocol parameter, in addition to
- three optional parameters (port_from, port_to, and source).
+ - List of rules that are set for the firewall policy. Each rule must contain protocol parameter, in addition to three
+ optional parameters (port_from, port_to, and source).
type: list
elements: dict
default: []
add_server_ips:
description:
- - A list of server identifiers (id or name) to be assigned to a firewall policy. Used in combination with update state.
+ - A list of server identifiers (ID or name) to be assigned to a firewall policy. Used in combination with update state.
type: list
elements: str
required: false
@@ -67,15 +67,15 @@ options:
default: []
add_rules:
description:
- - A list of rules that will be added to an existing firewall policy. It is syntax is the same as the one used for rules
- parameter. Used in combination with update state.
+ - List of rules that are added to an existing firewall policy. It is syntax is the same as the one used for rules parameter.
+ Used in combination with update state.
type: list
elements: dict
required: false
default: []
remove_rules:
description:
- - A list of rule IDs that will be removed from an existing firewall policy. Used in combination with update state.
+ - List of rule IDs that are removed from an existing firewall policy. Used in combination with update state.
type: list
elements: str
required: false
@@ -144,7 +144,7 @@ EXAMPLES = r"""
firewall_policy: ansible-firewall-policy-updated
add_server_ips:
- server_identifier (id or name)
- - server_identifier #2 (id or name)
+ - "server_identifier #2 (id or name)"
wait: true
wait_timeout: 500
state: update
@@ -182,8 +182,8 @@ EXAMPLES = r"""
auth_token: oneandone_private_api_key
firewall_policy: ansible-firewall-policy-updated
remove_rules:
- - rule_id #1
- - rule_id #2
+ - "rule_id #1"
+ - "rule_id #2"
- '...'
wait: true
wait_timeout: 500
@@ -194,7 +194,7 @@ RETURN = r"""
firewall_policy:
description: Information about the firewall policy that was processed.
type: dict
- sample: '{"id": "92B74394A397ECC3359825C1656D67A6", "name": "Default Policy"}'
+ sample: {"id": "92B74394A397ECC3359825C1656D67A6", "name": "Default Policy"}
returned: always
"""
@@ -288,7 +288,7 @@ def _add_firewall_rules(module, oneandone_conn, firewall_id, rules):
if module.check_mode:
firewall_policy_id = get_firewall_policy(oneandone_conn, firewall_id)
- if (firewall_rules and firewall_policy_id):
+ if firewall_rules and firewall_policy_id:
return True
return False
diff --git a/plugins/modules/oneandone_load_balancer.py b/plugins/modules/oneandone_load_balancer.py
index cb915e4efa..5a8ce7b8f0 100644
--- a/plugins/modules/oneandone_load_balancer.py
+++ b/plugins/modules/oneandone_load_balancer.py
@@ -78,15 +78,15 @@ options:
choices: ["ROUND_ROBIN", "LEAST_CONNECTIONS"]
datacenter:
description:
- - ID or country code of the datacenter where the load balancer will be created.
+ - ID or country code of the datacenter where the load balancer is created.
- If not specified, it defaults to V(US).
type: str
choices: ["US", "ES", "DE", "GB"]
required: false
rules:
description:
- - A list of rule objects that will be set for the load balancer. Each rule must contain protocol, port_balancer, and
- port_server parameters, in addition to source parameter, which is optional.
+ - A list of rule objects that are set for the load balancer. Each rule must contain protocol, port_balancer, and port_server
+ parameters, in addition to source parameter, which is optional.
type: list
elements: dict
default: []
@@ -111,15 +111,15 @@ options:
default: []
add_rules:
description:
- - A list of rules that will be added to an existing load balancer. It is syntax is the same as the one used for rules
- parameter. Used in combination with O(state=update).
+ - A list of rules that are added to an existing load balancer. It is syntax is the same as the one used for rules parameter.
+ Used in combination with O(state=update).
type: list
elements: dict
required: false
default: []
remove_rules:
description:
- - A list of rule IDs that will be removed from an existing load balancer. Used in combination with O(state=update).
+ - A list of rule IDs that are removed from an existing load balancer. Used in combination with O(state=update).
type: list
elements: str
required: false
@@ -233,8 +233,8 @@ EXAMPLES = r"""
load_balancer: ansible load balancer updated
description: Adding rules to a load balancer with ansible
remove_rules:
- - rule_id #1
- - rule_id #2
+ - "rule_id #1"
+ - "rule_id #2"
- '...'
wait: true
wait_timeout: 500
@@ -245,7 +245,7 @@ RETURN = r"""
load_balancer:
description: Information about the load balancer that was processed.
type: dict
- sample: '{"id": "92B74394A397ECC3359825C1656D67A6", "name": "Default Balancer"}'
+ sample: {"id": "92B74394A397ECC3359825C1656D67A6", "name": "Default Balancer"}
returned: always
"""
@@ -344,7 +344,7 @@ def _add_load_balancer_rules(module, oneandone_conn, load_balancer_id, rules):
if module.check_mode:
lb_id = get_load_balancer(oneandone_conn, load_balancer_id)
- if (load_balancer_rules and lb_id):
+ if load_balancer_rules and lb_id:
return True
return False
diff --git a/plugins/modules/oneandone_monitoring_policy.py b/plugins/modules/oneandone_monitoring_policy.py
index a0aa17611e..2d8693156c 100644
--- a/plugins/modules/oneandone_monitoring_policy.py
+++ b/plugins/modules/oneandone_monitoring_policy.py
@@ -89,7 +89,7 @@ options:
required: true
ports:
description:
- - Array of ports that will be monitoring.
+ - Array of ports that are to be monitored.
type: list
elements: dict
default: []
@@ -114,7 +114,7 @@ options:
required: true
processes:
description:
- - Array of processes that will be monitoring.
+ - Array of processes that are to be monitored.
type: list
elements: dict
default: []
@@ -413,7 +413,7 @@ RETURN = r"""
monitoring_policy:
description: Information about the monitoring policy that was processed.
type: dict
- sample: '{"id": "92B74394A397ECC3359825C1656D67A6", "name": "Default Policy"}'
+ sample: {"id": "92B74394A397ECC3359825C1656D67A6", "name": "Default Policy"}
returned: always
"""
@@ -537,7 +537,7 @@ def _add_processes(module, oneandone_conn, monitoring_policy_id, processes):
if module.check_mode:
mp_id = get_monitoring_policy(oneandone_conn, monitoring_policy_id)
- if (monitoring_policy_processes and mp_id):
+ if monitoring_policy_processes and mp_id:
return True
return False
diff --git a/plugins/modules/oneandone_private_network.py b/plugins/modules/oneandone_private_network.py
index 1a56fe345c..f39c464f96 100644
--- a/plugins/modules/oneandone_private_network.py
+++ b/plugins/modules/oneandone_private_network.py
@@ -50,7 +50,7 @@ options:
type: str
datacenter:
description:
- - The identifier of the datacenter where the private network will be created.
+ - The identifier of the datacenter where the private network is created.
type: str
choices: [US, ES, DE, GB]
network_address:
@@ -143,7 +143,7 @@ RETURN = r"""
private_network:
description: Information about the private network.
type: dict
- sample: '{"name": "backup_network", "id": "55726DEDA20C99CF6F2AF8F18CAC9963"}'
+ sample: {"name": "backup_network", "id": "55726DEDA20C99CF6F2AF8F18CAC9963"}
returned: always
"""
diff --git a/plugins/modules/oneandone_public_ip.py b/plugins/modules/oneandone_public_ip.py
index c30c0bbdc7..b6b49c5b4a 100644
--- a/plugins/modules/oneandone_public_ip.py
+++ b/plugins/modules/oneandone_public_ip.py
@@ -43,7 +43,7 @@ options:
required: false
datacenter:
description:
- - ID of the datacenter where the IP will be created (only for unassigned IPs).
+ - ID of the datacenter where the IP is created (only for unassigned IPs).
type: str
choices: [US, ES, DE, GB]
default: US
@@ -110,7 +110,7 @@ RETURN = r"""
public_ip:
description: Information about the public IP that was processed.
type: dict
- sample: '{"id": "F77CC589EBC120905B4F4719217BFF6D", "ip": "10.5.132.106"}'
+ sample: {"id": "F77CC589EBC120905B4F4719217BFF6D", "ip": "10.5.132.106"}
returned: always
"""
diff --git a/plugins/modules/oneandone_server.py b/plugins/modules/oneandone_server.py
index ae9198c7d0..7683ea1480 100644
--- a/plugins/modules/oneandone_server.py
+++ b/plugins/modules/oneandone_server.py
@@ -73,8 +73,8 @@ options:
type: float
hdds:
description:
- - A list of hard disks with nested O(ignore:hdds[].size) and O(ignore:hdds[].is_main) properties. It must be provided with O(vcore),
- O(cores_per_processor), and O(ram) parameters.
+ - A list of hard disks with nested O(ignore:hdds[].size) and O(ignore:hdds[].is_main) properties. It must be provided
+ with O(vcore), O(cores_per_processor), and O(ram) parameters.
type: list
elements: dict
private_network:
diff --git a/plugins/modules/onepassword_info.py b/plugins/modules/onepassword_info.py
index 00fa847c07..5689d28fe6 100644
--- a/plugins/modules/onepassword_info.py
+++ b/plugins/modules/onepassword_info.py
@@ -51,7 +51,7 @@ options:
section:
type: str
description:
- - The name of a section within this item containing the specified field (optional, will search all sections if not
+ - The name of a section within this item containing the specified field (optional, it searches all sections if not
specified).
vault:
type: str
@@ -62,8 +62,7 @@ options:
auto_login:
type: dict
description:
- - A dictionary containing authentication details. If this is set, M(community.general.onepassword_info) will attempt
- to sign in to 1Password automatically.
+ - A dictionary containing authentication details. If this is set, the module attempts to sign in to 1Password automatically.
- Without this option, you must have already logged in using the 1Password CLI before running Ansible.
- It is B(highly) recommended to store 1Password credentials in an Ansible Vault. Ensure that the key used to encrypt
the Ansible Vault is equal to or greater in strength than the 1Password master password.
@@ -72,7 +71,7 @@ options:
type: str
description:
- 1Password subdomain name (V(subdomain).1password.com).
- - If this is not specified, the most recent subdomain will be used.
+ - If this is not specified, the most recent subdomain is used.
username:
type: str
description:
@@ -209,7 +208,7 @@ class OnePasswordInfo(object):
def _parse_field(self, data_json, item_id, field_name, section_title=None):
data = json.loads(data_json)
- if ('documentAttributes' in data['details']):
+ if 'documentAttributes' in data['details']:
# This is actually a document, let's fetch the document data instead!
document = self._run(["get", "document", data['overview']['title']])
return {'document': document[1].strip()}
@@ -219,7 +218,7 @@ class OnePasswordInfo(object):
# Some types of 1Password items have a 'password' field directly alongside the 'fields' attribute,
# not inside it, so we need to check there first.
- if (field_name in data['details']):
+ if field_name in data['details']:
return {field_name: data['details'][field_name]}
# Otherwise we continue looking inside the 'fields' attribute for the specified field.
@@ -375,7 +374,7 @@ def main():
username=dict(type='str'),
master_password=dict(required=True, type='str', no_log=True),
secret_key=dict(type='str', no_log=True),
- ), default=None),
+ )),
search_terms=dict(required=True, type='list', elements='dict'),
),
supports_check_mode=True
diff --git a/plugins/modules/oneview_ethernet_network.py b/plugins/modules/oneview_ethernet_network.py
index 823fea3b2c..7ba3abb6e4 100644
--- a/plugins/modules/oneview_ethernet_network.py
+++ b/plugins/modules/oneview_ethernet_network.py
@@ -27,9 +27,9 @@ options:
state:
description:
- Indicates the desired state for the Ethernet Network resource.
- - V(present) will ensure data properties are compliant with OneView.
- - V(absent) will remove the resource from OneView, if it exists.
- - V(default_bandwidth_reset) will reset the network connection template to the default.
+ - V(present) ensures data properties are compliant with OneView.
+ - V(absent) removes the resource from OneView, if it exists.
+ - V(default_bandwidth_reset) resets the network connection template to the default.
type: str
default: present
choices: [present, absent, default_bandwidth_reset]
diff --git a/plugins/modules/oneview_fc_network.py b/plugins/modules/oneview_fc_network.py
index 312a5dc893..3063e80757 100644
--- a/plugins/modules/oneview_fc_network.py
+++ b/plugins/modules/oneview_fc_network.py
@@ -24,8 +24,8 @@ options:
state:
description:
- Indicates the desired state for the Fibre Channel Network resource.
- - V(present) will ensure data properties are compliant with OneView.
- - V(absent) will remove the resource from OneView, if it exists.
+ - V(present) ensures data properties are compliant with OneView.
+ - V(absent) removes the resource from OneView, if it exists.
type: str
choices: ['present', 'absent']
required: true
diff --git a/plugins/modules/oneview_fc_network_info.py b/plugins/modules/oneview_fc_network_info.py
index af20869dc3..9de22ef55c 100644
--- a/plugins/modules/oneview_fc_network_info.py
+++ b/plugins/modules/oneview_fc_network_info.py
@@ -87,8 +87,8 @@ class FcNetworkInfoModule(OneViewModuleBase):
def __init__(self):
argument_spec = dict(
- name=dict(required=False, type='str'),
- params=dict(required=False, type='dict')
+ name=dict(type='str'),
+ params=dict(type='dict')
)
super(FcNetworkInfoModule, self).__init__(
diff --git a/plugins/modules/oneview_fcoe_network.py b/plugins/modules/oneview_fcoe_network.py
index 15128bd372..37fbff9ef4 100644
--- a/plugins/modules/oneview_fcoe_network.py
+++ b/plugins/modules/oneview_fcoe_network.py
@@ -25,8 +25,8 @@ options:
state:
description:
- Indicates the desired state for the FCoE Network resource.
- - V(present) will ensure data properties are compliant with OneView.
- - V(absent) will remove the resource from OneView, if it exists.
+ - V(present) ensures data properties are compliant with OneView.
+ - V(absent) removes the resource from OneView, if it exists.
type: str
default: present
choices: ['present', 'absent']
diff --git a/plugins/modules/oneview_logical_interconnect_group.py b/plugins/modules/oneview_logical_interconnect_group.py
index a45224cb31..2683fc5468 100644
--- a/plugins/modules/oneview_logical_interconnect_group.py
+++ b/plugins/modules/oneview_logical_interconnect_group.py
@@ -28,8 +28,8 @@ options:
state:
description:
- Indicates the desired state for the Logical Interconnect Group resource.
- - V(absent) will remove the resource from OneView, if it exists.
- - V(present) will ensure data properties are compliant with OneView.
+ - V(absent) removes the resource from OneView, if it exists.
+ - V(present) ensures data properties are compliant with OneView.
type: str
choices: [absent, present]
default: present
diff --git a/plugins/modules/oneview_network_set.py b/plugins/modules/oneview_network_set.py
index a7fae51f21..ee5d3560a7 100644
--- a/plugins/modules/oneview_network_set.py
+++ b/plugins/modules/oneview_network_set.py
@@ -27,8 +27,8 @@ options:
state:
description:
- Indicates the desired state for the Network Set resource.
- - V(present) will ensure data properties are compliant with OneView.
- - V(absent) will remove the resource from OneView, if it exists.
+ - V(present) ensures data properties are compliant with OneView.
+ - V(absent) removes the resource from OneView, if it exists.
type: str
default: present
choices: ['present', 'absent']
diff --git a/plugins/modules/online_server_info.py b/plugins/modules/online_server_info.py
index e36c78ef0e..a06dae1926 100644
--- a/plugins/modules/online_server_info.py
+++ b/plugins/modules/online_server_info.py
@@ -41,92 +41,92 @@ online_server_info:
type: list
elements: dict
sample:
- "online_server_info": [
- {
- "abuse": "abuse@example.com",
- "anti_ddos": false,
- "bmc": {
- "session_key": null
- },
- "boot_mode": "normal",
- "contacts": {
- "owner": "foobar",
- "tech": "foobar"
- },
+ [
+ {
+ "abuse": "abuse@example.com",
+ "anti_ddos": false,
+ "bmc": {
+ "session_key": null
+ },
+ "boot_mode": "normal",
+ "contacts": {
+ "owner": "foobar",
+ "tech": "foobar"
+ },
+ "disks": [
+ {
+ "$ref": "/api/v1/server/hardware/disk/68452"
+ },
+ {
+ "$ref": "/api/v1/server/hardware/disk/68453"
+ }
+ ],
+ "drive_arrays": [
+ {
"disks": [
- {
- "$ref": "/api/v1/server/hardware/disk/68452"
- },
- {
- "$ref": "/api/v1/server/hardware/disk/68453"
- }
+ {
+ "$ref": "/api/v1/server/hardware/disk/68452"
+ },
+ {
+ "$ref": "/api/v1/server/hardware/disk/68453"
+ }
],
- "drive_arrays": [
- {
- "disks": [
- {
- "$ref": "/api/v1/server/hardware/disk/68452"
- },
- {
- "$ref": "/api/v1/server/hardware/disk/68453"
- }
- ],
- "raid_controller": {
- "$ref": "/api/v1/server/hardware/raidController/9910"
- },
- "raid_level": "RAID1"
- }
- ],
- "hardware_watch": true,
- "hostname": "sd-42",
- "id": 42,
- "ip": [
- {
- "address": "195.154.172.149",
- "mac": "28:92:4a:33:5e:c6",
- "reverse": "195-154-172-149.rev.poneytelecom.eu.",
- "switch_port_state": "up",
- "type": "public"
- },
- {
- "address": "10.90.53.212",
- "mac": "28:92:4a:33:5e:c7",
- "reverse": null,
- "switch_port_state": "up",
- "type": "private"
- }
- ],
- "last_reboot": "2018-08-23T08:32:03.000Z",
- "location": {
- "block": "A",
- "datacenter": "DC3",
- "position": 19,
- "rack": "A23",
- "room": "4 4-4"
+ "raid_controller": {
+ "$ref": "/api/v1/server/hardware/raidController/9910"
},
- "network": {
- "ip": [
- "195.154.172.149"
- ],
- "ipfo": [],
- "private": [
- "10.90.53.212"
- ]
- },
- "offer": "Pro-1-S-SATA",
- "os": {
- "name": "FreeBSD",
- "version": "11.1-RELEASE"
- },
- "power": "ON",
- "proactive_monitoring": false,
- "raid_controllers": [
- {
- "$ref": "/api/v1/server/hardware/raidController/9910"
- }
- ],
- "support": "Basic service level"
- }
+ "raid_level": "RAID1"
+ }
+ ],
+ "hardware_watch": true,
+ "hostname": "sd-42",
+ "id": 42,
+ "ip": [
+ {
+ "address": "195.154.172.149",
+ "mac": "28:92:4a:33:5e:c6",
+ "reverse": "195-154-172-149.rev.poneytelecom.eu.",
+ "switch_port_state": "up",
+ "type": "public"
+ },
+ {
+ "address": "10.90.53.212",
+ "mac": "28:92:4a:33:5e:c7",
+ "reverse": null,
+ "switch_port_state": "up",
+ "type": "private"
+ }
+ ],
+ "last_reboot": "2018-08-23T08:32:03.000Z",
+ "location": {
+ "block": "A",
+ "datacenter": "DC3",
+ "position": 19,
+ "rack": "A23",
+ "room": "4 4-4"
+ },
+ "network": {
+ "ip": [
+ "195.154.172.149"
+ ],
+ "ipfo": [],
+ "private": [
+ "10.90.53.212"
+ ]
+ },
+ "offer": "Pro-1-S-SATA",
+ "os": {
+ "name": "FreeBSD",
+ "version": "11.1-RELEASE"
+ },
+ "power": "ON",
+ "proactive_monitoring": false,
+ "raid_controllers": [
+ {
+ "$ref": "/api/v1/server/hardware/raidController/9910"
+ }
+ ],
+ "support": "Basic service level"
+ }
]
"""
diff --git a/plugins/modules/online_user_info.py b/plugins/modules/online_user_info.py
index 60e0763267..5b1628adad 100644
--- a/plugins/modules/online_user_info.py
+++ b/plugins/modules/online_user_info.py
@@ -37,13 +37,13 @@ online_user_info:
returned: success
type: dict
sample:
- "online_user_info": {
- "company": "foobar LLC",
- "email": "foobar@example.com",
- "first_name": "foo",
- "id": 42,
- "last_name": "bar",
- "login": "foobar"
+ {
+ "company": "foobar LLC",
+ "email": "foobar@example.com",
+ "first_name": "foo",
+ "id": 42,
+ "last_name": "bar",
+ "login": "foobar"
}
"""
diff --git a/plugins/modules/open_iscsi.py b/plugins/modules/open_iscsi.py
index defb0a072b..80360833a2 100644
--- a/plugins/modules/open_iscsi.py
+++ b/plugins/modules/open_iscsi.py
@@ -44,7 +44,7 @@ options:
login:
description:
- Whether the target node should be connected.
- - When O(target) is omitted, will login to all available.
+ - When O(target) is omitted, it logins to all available.
type: bool
aliases: [state]
node_auth:
@@ -84,7 +84,7 @@ options:
description:
- Whether the list of target nodes on the portal should be (re)discovered and added to the persistent iSCSI database.
- Keep in mind that C(iscsiadm) discovery resets configuration, like C(node.startup) to manual, hence combined with
- O(auto_node_startup=true) will always return a changed state.
+ O(auto_node_startup=true) always returns a changed state.
type: bool
default: false
show_nodes:
@@ -95,7 +95,7 @@ options:
rescan:
description:
- Rescan an established session for discovering new targets.
- - When O(target) is omitted, will rescan all sessions.
+ - When O(target) is omitted, it rescans all sessions.
type: bool
default: false
version_added: 4.1.0
diff --git a/plugins/modules/openbsd_pkg.py b/plugins/modules/openbsd_pkg.py
index b9a541cc44..e81fce3018 100644
--- a/plugins/modules/openbsd_pkg.py
+++ b/plugins/modules/openbsd_pkg.py
@@ -36,9 +36,9 @@ options:
elements: str
state:
description:
- - V(present) will make sure the package is installed.
- - V(latest) will make sure the latest version of the package is installed.
- - V(absent) will make sure the specified package is not installed.
+ - V(present) ensures the package is installed.
+ - V(latest) ensures the latest version of the package is installed.
+ - V(absent) ensures the specified package is not installed.
choices: [absent, latest, present, installed, removed]
default: present
type: str
@@ -73,7 +73,7 @@ options:
type: bool
default: false
notes:
- - When used with a C(loop:) each package will be processed individually, it is much more efficient to pass the list directly
+ - When used with a C(loop:) each package is processed individually, it is much more efficient to pass the list directly
to the O(name) option.
"""
diff --git a/plugins/modules/opendj_backendprop.py b/plugins/modules/opendj_backendprop.py
index cd55a39d51..be4edac125 100644
--- a/plugins/modules/opendj_backendprop.py
+++ b/plugins/modules/opendj_backendprop.py
@@ -10,10 +10,10 @@ __metaclass__ = type
DOCUMENTATION = r"""
module: opendj_backendprop
-short_description: Will update the backend configuration of OpenDJ using the dsconfig set-backend-prop command
+short_description: Update the backend configuration of OpenDJ using the dsconfig set-backend-prop command
description:
- - This module will update settings for OpenDJ with the command set-backend-prop.
- - It will check first using de get-backend-prop if configuration needs to be applied.
+ - This module updates settings for OpenDJ with the command C(set-backend-prop).
+ - It checks first using C(get-backend-prop) if configuration needs to be applied.
author:
- Werner Dijkerman (@dj-wasabi)
extends_documentation_fragment:
@@ -153,9 +153,9 @@ def main():
opendj_bindir=dict(default="/opt/opendj/bin", type="path"),
hostname=dict(required=True),
port=dict(required=True),
- username=dict(default="cn=Directory Manager", required=False),
- password=dict(required=False, no_log=True),
- passwordfile=dict(required=False, type="path"),
+ username=dict(default="cn=Directory Manager"),
+ password=dict(no_log=True),
+ passwordfile=dict(type="path"),
backend=dict(required=True),
name=dict(required=True),
value=dict(required=True),
diff --git a/plugins/modules/openwrt_init.py b/plugins/modules/openwrt_init.py
index bf5ce2b76a..c8c98f2d39 100644
--- a/plugins/modules/openwrt_init.py
+++ b/plugins/modules/openwrt_init.py
@@ -32,20 +32,19 @@ options:
state:
type: str
description:
- - V(started)/V(stopped) are idempotent actions that will not run commands unless necessary.
- - V(restarted) will always bounce the service.
- - V(reloaded) will always reload.
+ - V(started)/V(stopped) are idempotent actions that do not run commands unless necessary.
+ - V(restarted) always bounces the service.
+ - V(reloaded) always reloads.
choices: ['started', 'stopped', 'restarted', 'reloaded']
enabled:
description:
- - Whether the service should start on boot. B(At least one of state and enabled are required).
+ - Whether the service should start on boot. B(At least one) of O(state) and O(enabled) are required.
type: bool
pattern:
type: str
description:
- - If the service does not respond to the 'running' command, name a substring to look for as would be found in the output
- of the C(ps) command as a stand-in for a 'running' result. If the string is found, the service will be assumed to
- be running.
+ - If the service does not respond to the C(running) command, name a substring to look for as would be found in the output
+ of the C(ps) command as a stand-in for a C(running) result. If the string is found, the service is assumed to be running.
notes:
- One option other than O(name) is required.
requirements:
diff --git a/plugins/modules/opkg.py b/plugins/modules/opkg.py
index 4c2fe74949..b57fbd7df7 100644
--- a/plugins/modules/opkg.py
+++ b/plugins/modules/opkg.py
@@ -134,7 +134,6 @@ class Opkg(StateModuleHelper):
executable=dict(type="path"),
),
)
- use_old_vardict = False
def __init_module__(self):
self.vars.set("install_c", 0, output=False, change=True)
diff --git a/plugins/modules/osx_defaults.py b/plugins/modules/osx_defaults.py
index 75bd03b4ad..56ff6e1ac1 100644
--- a/plugins/modules/osx_defaults.py
+++ b/plugins/modules/osx_defaults.py
@@ -69,7 +69,7 @@ options:
state:
description:
- The state of the user defaults.
- - If set to V(list) will query the given parameter specified by O(key). Returns V(null) is nothing found or mis-spelled.
+ - If set to V(list) it queries the given parameter specified by O(key). Returns V(null) is nothing found or misspelled.
type: str
choices: [absent, list, present]
default: present
@@ -191,7 +191,7 @@ class OSXDefaults(object):
@staticmethod
def is_int(value):
as_str = str(value)
- if (as_str.startswith("-")):
+ if as_str.startswith("-"):
return as_str[1:].isdigit()
else:
return as_str.isdigit()
diff --git a/plugins/modules/ovh_ip_failover.py b/plugins/modules/ovh_ip_failover.py
index 0734e985f7..425ee614f5 100644
--- a/plugins/modules/ovh_ip_failover.py
+++ b/plugins/modules/ovh_ip_failover.py
@@ -49,13 +49,13 @@ options:
default: true
type: bool
description:
- - If true, the module will wait for the IP address to be moved. If false, exit without waiting. The taskId will be returned
+ - If V(true), the module waits for the IP address to be moved. If false, exit without waiting. The C(taskId) is returned
in module output.
wait_task_completion:
required: false
default: 0
description:
- - If not 0, the module will wait for this task ID to be completed. Use O(wait_task_completion) if you want to wait for
+ - If not V(0), the module waits for this task ID to be completed. Use O(wait_task_completion) if you want to wait for
completion of a previously executed task with O(wait_completion=false). You can execute this module repeatedly on
a list of failover IPs using O(wait_completion=false) (see examples).
type: int
diff --git a/plugins/modules/ovh_ip_loadbalancing_backend.py b/plugins/modules/ovh_ip_loadbalancing_backend.py
index cefb9231bd..8bf294a1d5 100644
--- a/plugins/modules/ovh_ip_loadbalancing_backend.py
+++ b/plugins/modules/ovh_ip_loadbalancing_backend.py
@@ -244,7 +244,7 @@ def main():
'parameters. Error returned by OVH api was : {0}'
.format(apiError))
- if (backendProperties['weight'] != weight):
+ if backendProperties['weight'] != weight:
# Change weight
try:
client.post(
@@ -263,7 +263,7 @@ def main():
.format(apiError))
moduleChanged = True
- if (backendProperties['probe'] != probe):
+ if backendProperties['probe'] != probe:
# Change probe
backendProperties['probe'] = probe
try:
diff --git a/plugins/modules/ovh_monthly_billing.py b/plugins/modules/ovh_monthly_billing.py
index 438bf7db7f..912b697517 100644
--- a/plugins/modules/ovh_monthly_billing.py
+++ b/plugins/modules/ovh_monthly_billing.py
@@ -98,10 +98,10 @@ def main():
argument_spec=dict(
project_id=dict(required=True),
instance_id=dict(required=True),
- endpoint=dict(required=False),
- application_key=dict(required=False, no_log=True),
- application_secret=dict(required=False, no_log=True),
- consumer_key=dict(required=False, no_log=True),
+ endpoint=dict(),
+ application_key=dict(no_log=True),
+ application_secret=dict(no_log=True),
+ consumer_key=dict(no_log=True),
),
supports_check_mode=True
)
diff --git a/plugins/modules/pacemaker_cluster.py b/plugins/modules/pacemaker_cluster.py
index caf18abb27..ffed13f9c5 100644
--- a/plugins/modules/pacemaker_cluster.py
+++ b/plugins/modules/pacemaker_cluster.py
@@ -13,6 +13,7 @@ module: pacemaker_cluster
short_description: Manage pacemaker clusters
author:
- Mathieu Bultel (@matbu)
+ - Dexter Le (@munchtoast)
description:
- This module can manage a pacemaker cluster and nodes from Ansible using the pacemaker CLI.
extends_documentation_fragment:
@@ -26,18 +27,20 @@ options:
state:
description:
- Indicate desired state of the cluster.
- choices: [cleanup, offline, online, restart]
+ - The value V(maintenance) has been added in community.general 11.1.0.
+ choices: [cleanup, offline, online, restart, maintenance]
type: str
- node:
+ name:
description:
- Specify which node of the cluster you want to manage. V(null) == the cluster status itself, V(all) == check the status
of all nodes.
type: str
+ aliases: ['node']
timeout:
description:
- - Timeout when the module should considered that the action has failed.
- default: 300
+ - Timeout period (in seconds) for polling the cluster operation.
type: int
+ default: 300
force:
description:
- Force the change of the cluster state.
@@ -63,132 +66,104 @@ out:
returned: always
"""
-import time
-
-from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.module_helper import StateModuleHelper
+from ansible_collections.community.general.plugins.module_utils.pacemaker import pacemaker_runner, get_pacemaker_maintenance_mode
-_PCS_CLUSTER_DOWN = "Error: cluster is not currently running on this node"
+class PacemakerCluster(StateModuleHelper):
+ module = dict(
+ argument_spec=dict(
+ state=dict(type='str', choices=[
+ 'cleanup', 'offline', 'online', 'restart', 'maintenance']),
+ name=dict(type='str', aliases=['node']),
+ timeout=dict(type='int', default=300),
+ force=dict(type='bool', default=True)
+ ),
+ supports_check_mode=True,
+ )
+ default_state = ""
+ def __init_module__(self):
+ self.runner = pacemaker_runner(self.module)
+ self.vars.set('apply_all', True if not self.module.params['name'] else False)
+ get_args = dict([('cli_action', 'cluster'), ('state', 'status'), ('name', None), ('apply_all', self.vars.apply_all)])
+ if self.module.params['state'] == "maintenance":
+ get_args['cli_action'] = "property"
+ get_args['state'] = "config"
+ get_args['name'] = "maintenance-mode"
+ elif self.module.params['state'] == "cleanup":
+ get_args['cli_action'] = "resource"
+ get_args['name'] = self.module.params['name']
-def get_cluster_status(module):
- cmd = ["pcs", "cluster", "status"]
- rc, out, err = module.run_command(cmd)
- if out in _PCS_CLUSTER_DOWN:
- return 'offline'
- else:
- return 'online'
+ self.vars.set('get_args', get_args)
+ self.vars.set('previous_value', self._get()['out'])
+ self.vars.set('value', self.vars.previous_value, change=True, diff=True)
+ if not self.module.params['state']:
+ self.module.deprecate(
+ 'Parameter "state" values not set is being deprecated. Make sure to provide a value for "state"',
+ version='12.0.0',
+ collection_name='community.general'
+ )
-def get_node_status(module, node='all'):
- node_l = ["all"] if node == "all" else []
- cmd = ["pcs", "cluster", "pcsd-status"] + node_l
- rc, out, err = module.run_command(cmd)
- if rc == 1:
- module.fail_json(msg="Command execution failed.\nCommand: `%s`\nError: %s" % (cmd, err))
- status = []
- for o in out.splitlines():
- status.append(o.split(':'))
- return status
+ def __quit_module__(self):
+ self.vars.set('value', self._get()['out'])
+ def _process_command_output(self, fail_on_err, ignore_err_msg=""):
+ def process(rc, out, err):
+ if fail_on_err and rc != 0 and err and ignore_err_msg not in err:
+ self.do_raise('pcs failed with error (rc={0}): {1}'.format(rc, err))
+ out = out.rstrip()
+ return None if out == "" else out
+ return process
-def clean_cluster(module, timeout):
- cmd = ["pcs", "resource", "cleanup"]
- rc, out, err = module.run_command(cmd)
- if rc == 1:
- module.fail_json(msg="Command execution failed.\nCommand: `%s`\nError: %s" % (cmd, err))
+ def _get(self):
+ with self.runner('cli_action state name') as ctx:
+ result = ctx.run(cli_action=self.vars.get_args['cli_action'], state=self.vars.get_args['state'], name=self.vars.get_args['name'])
+ return dict([('rc', result[0]),
+ ('out', result[1] if result[1] != "" else None),
+ ('err', result[2])])
+ def state_cleanup(self):
+ with self.runner('cli_action state name', output_process=self._process_command_output(True, "Fail"), check_mode_skip=True) as ctx:
+ ctx.run(cli_action='resource')
-def set_cluster(module, state, timeout, force):
- if state == 'online':
- cmd = ["pcs", "cluster", "start"]
- if state == 'offline':
- cmd = ["pcs", "cluster", "stop"]
- if force:
- cmd = cmd + ["--force"]
- rc, out, err = module.run_command(cmd)
- if rc == 1:
- module.fail_json(msg="Command execution failed.\nCommand: `%s`\nError: %s" % (cmd, err))
+ def state_offline(self):
+ with self.runner('cli_action state name apply_all wait',
+ output_process=self._process_command_output(True, "not currently running"),
+ check_mode_skip=True) as ctx:
+ ctx.run(cli_action='cluster', apply_all=self.vars.apply_all, wait=self.module.params['timeout'])
- t = time.time()
- ready = False
- while time.time() < t + timeout:
- cluster_state = get_cluster_status(module)
- if cluster_state == state:
- ready = True
- break
- if not ready:
- module.fail_json(msg="Failed to set the state `%s` on the cluster\n" % (state))
+ def state_online(self):
+ with self.runner('cli_action state name apply_all wait',
+ output_process=self._process_command_output(True, "currently running"),
+ check_mode_skip=True) as ctx:
+ ctx.run(cli_action='cluster', apply_all=self.vars.apply_all, wait=self.module.params['timeout'])
+
+ if get_pacemaker_maintenance_mode(self.runner):
+ with self.runner('cli_action state name', output_process=self._process_command_output(True, "Fail"), check_mode_skip=True) as ctx:
+ ctx.run(cli_action='property', state='maintenance', name='maintenance-mode=false')
+
+ def state_maintenance(self):
+ with self.runner('cli_action state name',
+ output_process=self._process_command_output(True, "Fail"),
+ check_mode_skip=True) as ctx:
+ ctx.run(cli_action='property', name='maintenance-mode=true')
+
+ def state_restart(self):
+ with self.runner('cli_action state name apply_all wait',
+ output_process=self._process_command_output(True, "not currently running"),
+ check_mode_skip=True) as ctx:
+ ctx.run(cli_action='cluster', state='offline', apply_all=self.vars.apply_all, wait=self.module.params['timeout'])
+ ctx.run(cli_action='cluster', state='online', apply_all=self.vars.apply_all, wait=self.module.params['timeout'])
+
+ if get_pacemaker_maintenance_mode(self.runner):
+ with self.runner('cli_action state name', output_process=self._process_command_output(True, "Fail"), check_mode_skip=True) as ctx:
+ ctx.run(cli_action='property', state='maintenance', name='maintenance-mode=false')
def main():
- argument_spec = dict(
- state=dict(type='str', choices=['online', 'offline', 'restart', 'cleanup']),
- node=dict(type='str'),
- timeout=dict(type='int', default=300),
- force=dict(type='bool', default=True),
- )
-
- module = AnsibleModule(
- argument_spec,
- supports_check_mode=True,
- )
- changed = False
- state = module.params['state']
- node = module.params['node']
- force = module.params['force']
- timeout = module.params['timeout']
-
- if state in ['online', 'offline']:
- # Get cluster status
- if node is None:
- cluster_state = get_cluster_status(module)
- if cluster_state == state:
- module.exit_json(changed=changed, out=cluster_state)
- else:
- if module.check_mode:
- module.exit_json(changed=True)
- set_cluster(module, state, timeout, force)
- cluster_state = get_cluster_status(module)
- if cluster_state == state:
- module.exit_json(changed=True, out=cluster_state)
- else:
- module.fail_json(msg="Fail to bring the cluster %s" % state)
- else:
- cluster_state = get_node_status(module, node)
- # Check cluster state
- for node_state in cluster_state:
- if node_state[1].strip().lower() == state:
- module.exit_json(changed=changed, out=cluster_state)
- else:
- if module.check_mode:
- module.exit_json(changed=True)
- # Set cluster status if needed
- set_cluster(module, state, timeout, force)
- cluster_state = get_node_status(module, node)
- module.exit_json(changed=True, out=cluster_state)
-
- elif state == 'restart':
- if module.check_mode:
- module.exit_json(changed=True)
- set_cluster(module, 'offline', timeout, force)
- cluster_state = get_cluster_status(module)
- if cluster_state == 'offline':
- set_cluster(module, 'online', timeout, force)
- cluster_state = get_cluster_status(module)
- if cluster_state == 'online':
- module.exit_json(changed=True, out=cluster_state)
- else:
- module.fail_json(msg="Failed during the restart of the cluster, the cluster cannot be started")
- else:
- module.fail_json(msg="Failed during the restart of the cluster, the cluster cannot be stopped")
-
- elif state == 'cleanup':
- if module.check_mode:
- module.exit_json(changed=True)
- clean_cluster(module, timeout)
- cluster_state = get_cluster_status(module)
- module.exit_json(changed=True, out=cluster_state)
+ PacemakerCluster.execute()
if __name__ == '__main__':
diff --git a/plugins/modules/pacemaker_resource.py b/plugins/modules/pacemaker_resource.py
index 187ba6f1f0..2fdf785487 100644
--- a/plugins/modules/pacemaker_resource.py
+++ b/plugins/modules/pacemaker_resource.py
@@ -8,8 +8,7 @@
from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = '''
----
+DOCUMENTATION = r"""
module: pacemaker_resource
short_description: Manage pacemaker resources
author:
@@ -28,7 +27,7 @@ options:
state:
description:
- Indicate desired state for cluster resource.
- choices: [ present, absent, enabled, disabled ]
+ choices: [present, absent, enabled, disabled]
default: present
type: str
name:
@@ -89,7 +88,7 @@ options:
description:
- Action to apply to resource.
type: str
- choices: [ clone, master, group, promotable ]
+ choices: [clone, master, group, promotable]
argument_option:
description:
- Options to associate with resource action.
@@ -100,9 +99,9 @@ options:
- Timeout period for polling the resource creation.
type: int
default: 300
-'''
+"""
-EXAMPLES = '''
+EXAMPLES = r"""
---
- name: Create pacemaker resource
hosts: localhost
@@ -124,18 +123,18 @@ EXAMPLES = '''
- operation_action: monitor
operation_option:
- interval=20
-'''
+"""
-RETURN = '''
+RETURN = r"""
cluster_resources:
- description: The cluster resource output message.
- type: str
- sample: "Assumed agent name ocf:heartbeat:IPaddr2 (deduced from IPaddr2)"
- returned: always
-'''
+ description: The cluster resource output message.
+ type: str
+ sample: "Assumed agent name ocf:heartbeat:IPaddr2 (deduced from IPaddr2)"
+ returned: always
+"""
from ansible_collections.community.general.plugins.module_utils.module_helper import StateModuleHelper
-from ansible_collections.community.general.plugins.module_utils.pacemaker import pacemaker_runner
+from ansible_collections.community.general.plugins.module_utils.pacemaker import pacemaker_runner, get_pacemaker_maintenance_mode
class PacemakerResource(StateModuleHelper):
@@ -164,13 +163,15 @@ class PacemakerResource(StateModuleHelper):
required_if=[('state', 'present', ['resource_type', 'resource_option'])],
supports_check_mode=True,
)
- use_old_vardict = False
- default_state = "present"
def __init_module__(self):
- self.runner = pacemaker_runner(self.module, cli_action='resource')
- self.vars.set('previous_value', self._get())
+ self.runner = pacemaker_runner(self.module)
+ self.vars.set('previous_value', self._get()['out'])
self.vars.set('value', self.vars.previous_value, change=True, diff=True)
+ self.module.params['name'] = self.module.params['name'] or None
+
+ def __quit_module__(self):
+ self.vars.set('value', self._get()['out'])
def _process_command_output(self, fail_on_err, ignore_err_msg=""):
def process(rc, out, err):
@@ -181,43 +182,31 @@ class PacemakerResource(StateModuleHelper):
return process
def _get(self):
- with self.runner('state name', output_process=self._process_command_output(False)) as ctx:
- return ctx.run(state='status')
+ with self.runner('cli_action state name') as ctx:
+ result = ctx.run(cli_action="resource", state='status')
+ return dict([('rc', result[0]),
+ ('out', result[1] if result[1] != "" else None),
+ ('err', result[2])])
def state_absent(self):
- with self.runner('state name', output_process=self._process_command_output(True, "does not exist"), check_mode_skip=True) as ctx:
- ctx.run()
- self.vars.set('value', self._get())
- self.vars.stdout = ctx.results_out
- self.vars.stderr = ctx.results_err
- self.vars.cmd = ctx.cmd
+ force = get_pacemaker_maintenance_mode(self.runner)
+ with self.runner('cli_action state name force', output_process=self._process_command_output(True, "does not exist"), check_mode_skip=True) as ctx:
+ ctx.run(cli_action='resource', force=force)
def state_present(self):
with self.runner(
- 'state name resource_type resource_option resource_operation resource_meta resource_argument wait',
- output_process=self._process_command_output(True, "already exists"),
+ 'cli_action state name resource_type resource_option resource_operation resource_meta resource_argument wait',
+ output_process=self._process_command_output(not get_pacemaker_maintenance_mode(self.runner), "already exists"),
check_mode_skip=True) as ctx:
- ctx.run()
- self.vars.set('value', self._get())
- self.vars.stdout = ctx.results_out
- self.vars.stderr = ctx.results_err
- self.vars.cmd = ctx.cmd
+ ctx.run(cli_action='resource')
def state_enabled(self):
- with self.runner('state name', output_process=self._process_command_output(True, "Starting"), check_mode_skip=True) as ctx:
- ctx.run()
- self.vars.set('value', self._get())
- self.vars.stdout = ctx.results_out
- self.vars.stderr = ctx.results_err
- self.vars.cmd = ctx.cmd
+ with self.runner('cli_action state name', output_process=self._process_command_output(True, "Starting"), check_mode_skip=True) as ctx:
+ ctx.run(cli_action='resource')
def state_disabled(self):
- with self.runner('state name', output_process=self._process_command_output(True, "Stopped"), check_mode_skip=True) as ctx:
- ctx.run()
- self.vars.set('value', self._get())
- self.vars.stdout = ctx.results_out
- self.vars.stderr = ctx.results_err
- self.vars.cmd = ctx.cmd
+ with self.runner('cli_action state name', output_process=self._process_command_output(True, "Stopped"), check_mode_skip=True) as ctx:
+ ctx.run(cli_action='resource')
def main():
diff --git a/plugins/modules/packet_device.py b/plugins/modules/packet_device.py
index d3746b173f..f17db56c8c 100644
--- a/plugins/modules/packet_device.py
+++ b/plugins/modules/packet_device.py
@@ -111,9 +111,9 @@ options:
state:
description:
- Desired state of the device.
- - If set to V(present) (the default), the module call will return immediately after the device-creating HTTP request
- successfully returns.
- - If set to V(active), the module call will block until all the specified devices are in state active due to the Packet
+ - If set to V(present) (the default), the module call returns immediately after the device-creating HTTP request successfully
+ returns.
+ - If set to V(active), the module call blocks until all the specified devices are in state active due to the Packet
API, or until O(wait_timeout).
choices: [present, absent, active, inactive, rebooted]
default: present
@@ -127,16 +127,16 @@ options:
wait_for_public_IPv:
description:
- Whether to wait for the instance to be assigned a public IPv4/IPv6 address.
- - If set to 4, it will wait until IPv4 is assigned to the instance.
- - If set to 6, wait until public IPv6 is assigned to the instance.
+ - If set to V(4), it waits until IPv4 is assigned to the instance.
+ - If set to V(6), it waits until public IPv6 is assigned to the instance.
choices: [4, 6]
type: int
wait_timeout:
description:
- How long (seconds) to wait either for automatic IP address assignment, or for the device to reach the V(active) state.
- - If O(wait_for_public_IPv) is set and O(state=active), the module will wait for both events consequently, applying
- the timeout twice.
+ - If O(wait_for_public_IPv) is set and O(state=active), the module waits for both events consequently, applying the
+ timeout twice.
default: 900
type: int
@@ -258,26 +258,18 @@ EXAMPLES = r"""
"""
RETURN = r"""
-changed:
- description: True if a device was altered in any way (created, modified or removed).
- type: bool
- sample: true
- returned: success
-
devices:
description: Information about each device that was processed.
type: list
sample:
- - {
- "hostname": "my-server.com",
- "id": "2a5122b9-c323-4d5c-b53c-9ad3f54273e7",
- "public_ipv4": "147.229.15.12",
- "private-ipv4": "10.0.15.12",
- "tags": [],
- "locked": false,
- "state": "provisioning",
- "public_ipv6": "2604:1380:2:5200::3"
- }
+ - "hostname": "my-server.com"
+ "id": "2a5122b9-c323-4d5c-b53c-9ad3f54273e7"
+ "public_ipv4": "147.229.15.12"
+ "private-ipv4": "10.0.15.12"
+ "tags": []
+ "locked": false
+ "state": "provisioning"
+ "public_ipv6": "2604:1380:2:5200::3"
returned: success
"""
@@ -422,12 +414,12 @@ def get_hostname_list(module):
# at this point, hostnames is a list
hostnames = [h.strip() for h in hostnames]
- if (len(hostnames) > 1) and (count > 1):
+ if len(hostnames) > 1 and count > 1:
_msg = ("If you set count>1, you should only specify one hostname "
"with the %d formatter, not a list of hostnames.")
raise Exception(_msg)
- if (len(hostnames) == 1) and (count > 0):
+ if len(hostnames) == 1 and count > 0:
hostname_spec = hostnames[0]
count_range = range(count_offset, count_offset + count)
if re.search(r"%\d{0,2}d", hostname_spec):
diff --git a/plugins/modules/packet_ip_subnet.py b/plugins/modules/packet_ip_subnet.py
index ab74dac840..0029623a10 100644
--- a/plugins/modules/packet_ip_subnet.py
+++ b/plugins/modules/packet_ip_subnet.py
@@ -75,11 +75,11 @@ options:
state:
description:
- Desired state of the IP subnet on the specified device.
- - With O(state=present), you must specify either O(hostname) or O(device_id). Subnet with given CIDR will then be assigned
+ - With O(state=present), you must specify either O(hostname) or O(device_id). Subnet with given CIDR is then assigned
to the specified device.
- - With O(state=absent), you can specify either O(hostname) or O(device_id). The subnet will be removed from specified
+ - With O(state=absent), you can specify either O(hostname) or O(device_id). The subnet is then removed from specified
devices.
- - If you leave both O(hostname) and O(device_id) empty, the subnet will be removed from any device it is assigned to.
+ - If you leave both O(hostname) and O(device_id) empty, the subnet is then removed from any device it is assigned to.
choices: ['present', 'absent']
default: 'present'
type: str
@@ -123,12 +123,6 @@ EXAMPLES = r"""
"""
RETURN = r"""
-changed:
- description: True if an IP address assignments were altered in any way (created or removed).
- type: bool
- sample: true
- returned: success
-
device_id:
type: str
description: UUID of the device associated with the specified IP address.
diff --git a/plugins/modules/packet_project.py b/plugins/modules/packet_project.py
index d61c9e598b..afadec36be 100644
--- a/plugins/modules/packet_project.py
+++ b/plugins/modules/packet_project.py
@@ -110,12 +110,6 @@ EXAMPLES = r"""
"""
RETURN = r"""
-changed:
- description: True if a project was created or removed.
- type: bool
- sample: true
- returned: success
-
name:
description: Name of addressed project.
type: str
diff --git a/plugins/modules/packet_sshkey.py b/plugins/modules/packet_sshkey.py
index 8172482108..ec76a17b4c 100644
--- a/plugins/modules/packet_sshkey.py
+++ b/plugins/modules/packet_sshkey.py
@@ -35,7 +35,7 @@ options:
type: str
label:
description:
- - Label for the key. If you keep it empty, it will be read from key string.
+ - Label for the key. If you keep it empty, it is read from key string.
type: str
aliases: [name]
id:
@@ -85,15 +85,11 @@ EXAMPLES = r"""
"""
RETURN = r"""
-changed:
- description: True if a sshkey was created or removed.
- type: bool
- sample: true
- returned: always
sshkeys:
- description: Information about sshkeys that were created/removed.
- type: list
- sample: [
+ description: Information about sshkeys that were created/removed.
+ type: list
+ sample:
+ [
{
"fingerprint": "5c:93:74:7c:ed:07:17:62:28:75:79:23:d6:08:93:46",
"id": "41d61bd8-3342-428b-a09c-e67bdd18a9b7",
@@ -101,7 +97,7 @@ sshkeys:
"label": "mynewkey33"
}
]
- returned: always
+ returned: always
"""
import os
diff --git a/plugins/modules/pacman.py b/plugins/modules/pacman.py
index 38a98bba60..359cbc51d1 100644
--- a/plugins/modules/pacman.py
+++ b/plugins/modules/pacman.py
@@ -41,9 +41,9 @@ options:
state:
description:
- Whether to install (V(present) or V(installed), V(latest)), or remove (V(absent) or V(removed)) a package.
- - V(present) and V(installed) will simply ensure that a desired package is installed.
- - V(latest) will update the specified package if it is not of the latest available version.
- - V(absent) and V(removed) will remove the specified package.
+ - V(present) and V(installed) simply ensure that a desired package is installed.
+ - V(latest) updates the specified package if it is not of the latest available version.
+ - V(absent) and V(removed) remove the specified package.
default: present
choices: [absent, installed, latest, present, removed]
type: str
@@ -116,20 +116,20 @@ options:
reason_for:
description:
- Set the install reason for V(all) packages or only for V(new) packages.
- - In case of O(state=latest) already installed packages which will be updated to a newer version are not counted as
- V(new).
+ - In case of O(state=latest) already installed packages which are updated to a newer version are not counted as V(new).
default: new
choices: [all, new]
type: str
version_added: 5.4.0
notes:
- - When used with a C(loop:) each package will be processed individually, it is much more efficient to pass the list directly
+ - When used with a C(loop:) each package is processed individually, it is much more efficient to pass the list directly
to the O(name) option.
- To use an AUR helper (O(executable) option), a few extra setup steps might be required beforehand. For example, a dedicated
build user with permissions to install packages could be necessary.
- 'In the tests, while using C(yay) as the O(executable) option, the module failed to install AUR packages with the error:
C(error: target not found: ).'
+ - The common return values `stdout` and `stderr` are returned upon success, when needed, since community.general 4.1.0.
"""
RETURN = r"""
@@ -142,7 +142,7 @@ packages:
returned: success and O(name) is specified or O(upgrade=true)
type: list
elements: str
- sample: [package, other-package]
+ sample: ["package", "other-package"]
cache_updated:
description:
@@ -152,22 +152,6 @@ cache_updated:
type: bool
sample: false
version_added: 4.6.0
-
-stdout:
- description:
- - Output from pacman.
- returned: success, when needed
- type: str
- sample: ":: Synchronizing package databases... core is up to date :: Starting full system upgrade..."
- version_added: 4.1.0
-
-stderr:
- description:
- - Error output from pacman.
- returned: success, when needed
- type: str
- sample: "warning: libtool: local (2.4.6+44+gb9b44533-14) is newer than core (2.4.6+42+gb88cebd5-15)\nwarning ..."
- version_added: 4.1.0
"""
EXAMPLES = r"""
diff --git a/plugins/modules/pacman_key.py b/plugins/modules/pacman_key.py
index f98fb6f8a3..851655f9fc 100644
--- a/plugins/modules/pacman_key.py
+++ b/plugins/modules/pacman_key.py
@@ -18,9 +18,9 @@ description:
- Add or remove gpg keys from the pacman keyring.
notes:
- Use full-length key ID (40 characters).
- - Keys will be verified when using O(data), O(file), or O(url) unless O(verify) is overridden.
- - Keys will be locally signed after being imported into the keyring.
- - If the key ID exists in the keyring, the key will not be added unless O(force_update) is specified.
+ - Keys are verified when using O(data), O(file), or O(url) unless O(verify) is overridden.
+ - Keys are locally signed after being imported into the keyring.
+ - If the key ID exists in the keyring, the key is not added unless O(force_update) is specified.
- O(data), O(file), O(url), and O(keyserver) are mutually exclusive.
requirements:
- gpg
@@ -72,16 +72,22 @@ options:
keyring:
description:
- The full path to the keyring folder on the remote server.
- - If not specified, module will use pacman's default (V(/etc/pacman.d/gnupg)).
+ - If not specified, module uses pacman's default (V(/etc/pacman.d/gnupg)).
- Useful if the remote system requires an alternative gnupg directory.
type: path
default: /etc/pacman.d/gnupg
state:
description:
- - Ensures that the key is present (added) or absent (revoked).
+ - Ensures that the key is V(present) (added) or V(absent) (revoked).
default: present
choices: [absent, present]
type: str
+ ensure_trusted:
+ description:
+ - Ensure that the key is trusted (signed by the Pacman machine key and not expired).
+ type: bool
+ default: false
+ version_added: 11.0.0
"""
EXAMPLES = r"""
@@ -129,12 +135,55 @@ from ansible.module_utils.urls import fetch_url
from ansible.module_utils.common.text.converters import to_native
+class GpgListResult(object):
+ """Wraps gpg --list-* output."""
+
+ def __init__(self, line):
+ self._parts = line.split(':')
+
+ @property
+ def kind(self):
+ return self._parts[0]
+
+ @property
+ def valid(self):
+ return self._parts[1]
+
+ @property
+ def is_fully_valid(self):
+ return self.valid == 'f'
+
+ @property
+ def key(self):
+ return self._parts[4]
+
+ @property
+ def user_id(self):
+ return self._parts[9]
+
+
+def gpg_get_first_attr_of_kind(lines, kind, attr):
+ for line in lines:
+ glr = GpgListResult(line)
+ if glr.kind == kind:
+ return getattr(glr, attr)
+
+
+def gpg_get_all_attrs_of_kind(lines, kind, attr):
+ result = []
+ for line in lines:
+ glr = GpgListResult(line)
+ if glr.kind == kind:
+ result.append(getattr(glr, attr))
+ return result
+
+
class PacmanKey(object):
def __init__(self, module):
self.module = module
# obtain binary paths for gpg & pacman-key
- self.gpg = module.get_bin_path('gpg', required=True)
- self.pacman_key = module.get_bin_path('pacman-key', required=True)
+ self.gpg_binary = module.get_bin_path('gpg', required=True)
+ self.pacman_key_binary = module.get_bin_path('pacman-key', required=True)
# obtain module parameters
keyid = module.params['id']
@@ -146,47 +195,71 @@ class PacmanKey(object):
force_update = module.params['force_update']
keyring = module.params['keyring']
state = module.params['state']
+ ensure_trusted = module.params['ensure_trusted']
self.keylength = 40
# sanitise key ID & check if key exists in the keyring
keyid = self.sanitise_keyid(keyid)
- key_present = self.key_in_keyring(keyring, keyid)
+ key_validity = self.key_validity(keyring, keyid)
+ key_present = len(key_validity) > 0
+ key_valid = any(key_validity)
# check mode
if module.check_mode:
- if state == "present":
+ if state == 'present':
changed = (key_present and force_update) or not key_present
+ if not changed and ensure_trusted:
+ changed = not (key_valid and self.key_is_trusted(keyring, keyid))
module.exit_json(changed=changed)
- elif state == "absent":
- if key_present:
- module.exit_json(changed=True)
- module.exit_json(changed=False)
+ if state == 'absent':
+ module.exit_json(changed=key_present)
- if state == "present":
- if key_present and not force_update:
+ if state == 'present':
+ trusted = key_valid and self.key_is_trusted(keyring, keyid)
+ if not force_update and key_present and (not ensure_trusted or trusted):
module.exit_json(changed=False)
-
+ changed = False
if data:
file = self.save_key(data)
self.add_key(keyring, file, keyid, verify)
- module.exit_json(changed=True)
+ changed = True
elif file:
self.add_key(keyring, file, keyid, verify)
- module.exit_json(changed=True)
+ changed = True
elif url:
data = self.fetch_key(url)
file = self.save_key(data)
self.add_key(keyring, file, keyid, verify)
- module.exit_json(changed=True)
+ changed = True
elif keyserver:
self.recv_key(keyring, keyid, keyserver)
- module.exit_json(changed=True)
- elif state == "absent":
+ changed = True
+ if changed or (ensure_trusted and not trusted):
+ self.lsign_key(keyring=keyring, keyid=keyid)
+ changed = True
+ module.exit_json(changed=changed)
+ elif state == 'absent':
if key_present:
self.remove_key(keyring, keyid)
module.exit_json(changed=True)
module.exit_json(changed=False)
+ def gpg(self, args, keyring=None, **kwargs):
+ cmd = [self.gpg_binary]
+ if keyring:
+ cmd.append('--homedir={keyring}'.format(keyring=keyring))
+ cmd.extend(['--no-permission-warning', '--with-colons', '--quiet', '--batch', '--no-tty'])
+ return self.module.run_command(cmd + args, **kwargs)
+
+ def pacman_key(self, args, keyring, **kwargs):
+ return self.module.run_command(
+ [self.pacman_key_binary, '--gpgdir', keyring] + args,
+ **kwargs)
+
+ def pacman_machine_key(self, keyring):
+ unused_rc, stdout, unused_stderr = self.gpg(['--list-secret-key'], keyring=keyring)
+ return gpg_get_first_attr_of_kind(stdout.splitlines(), 'sec', 'key')
+
def is_hexadecimal(self, string):
"""Check if a given string is valid hexadecimal"""
try:
@@ -216,14 +289,11 @@ class PacmanKey(object):
def recv_key(self, keyring, keyid, keyserver):
"""Receives key via keyserver"""
- cmd = [self.pacman_key, '--gpgdir', keyring, '--keyserver', keyserver, '--recv-keys', keyid]
- self.module.run_command(cmd, check_rc=True)
- self.lsign_key(keyring, keyid)
+ self.pacman_key(['--keyserver', keyserver, '--recv-keys', keyid], keyring=keyring, check_rc=True)
def lsign_key(self, keyring, keyid):
"""Locally sign key"""
- cmd = [self.pacman_key, '--gpgdir', keyring]
- self.module.run_command(cmd + ['--lsign-key', keyid], check_rc=True)
+ self.pacman_key(['--lsign-key', keyid], keyring=keyring, check_rc=True)
def save_key(self, data):
"Saves key data to a temporary file"
@@ -238,14 +308,11 @@ class PacmanKey(object):
"""Add key to pacman's keyring"""
if verify:
self.verify_keyfile(keyfile, keyid)
- cmd = [self.pacman_key, '--gpgdir', keyring, '--add', keyfile]
- self.module.run_command(cmd, check_rc=True)
- self.lsign_key(keyring, keyid)
+ self.pacman_key(['--add', keyfile], keyring=keyring, check_rc=True)
def remove_key(self, keyring, keyid):
"""Remove key from pacman's keyring"""
- cmd = [self.pacman_key, '--gpgdir', keyring, '--delete', keyid]
- self.module.run_command(cmd, check_rc=True)
+ self.pacman_key(['--delete', keyid], keyring=keyring, check_rc=True)
def verify_keyfile(self, keyfile, keyid):
"""Verify that keyfile matches the specified key ID"""
@@ -254,48 +321,29 @@ class PacmanKey(object):
elif keyid is None:
self.module.fail_json(msg="expected a key ID, got none")
- rc, stdout, stderr = self.module.run_command(
- [
- self.gpg,
- '--with-colons',
- '--with-fingerprint',
- '--batch',
- '--no-tty',
- '--show-keys',
- keyfile
- ],
+ rc, stdout, stderr = self.gpg(
+ ['--with-fingerprint', '--show-keys', keyfile],
check_rc=True,
)
- extracted_keyid = None
- for line in stdout.splitlines():
- if line.startswith('fpr:'):
- extracted_keyid = line.split(':')[9]
- break
-
+ extracted_keyid = gpg_get_first_attr_of_kind(stdout.splitlines(), 'fpr', 'user_id')
if extracted_keyid != keyid:
self.module.fail_json(msg="key ID does not match. expected %s, got %s" % (keyid, extracted_keyid))
- def key_in_keyring(self, keyring, keyid):
- "Check if the key ID is in pacman's keyring"
- rc, stdout, stderr = self.module.run_command(
- [
- self.gpg,
- '--with-colons',
- '--batch',
- '--no-tty',
- '--no-default-keyring',
- '--keyring=%s/pubring.gpg' % keyring,
- '--list-keys', keyid
- ],
- check_rc=False,
- )
+ def key_validity(self, keyring, keyid):
+ "Check if the key ID is in pacman's keyring and not expired"
+ rc, stdout, stderr = self.gpg(['--no-default-keyring', '--list-keys', keyid], keyring=keyring, check_rc=False)
if rc != 0:
if stderr.find("No public key") >= 0:
- return False
+ return []
else:
self.module.fail_json(msg="gpg returned an error: %s" % stderr)
- return True
+ return gpg_get_all_attrs_of_kind(stdout.splitlines(), 'uid', 'is_fully_valid')
+
+ def key_is_trusted(self, keyring, keyid):
+ """Check if key is signed and not expired."""
+ unused_rc, stdout, unused_stderr = self.gpg(['--check-signatures', keyid], keyring=keyring)
+ return self.pacman_machine_key(keyring) in gpg_get_all_attrs_of_kind(stdout.splitlines(), 'sig', 'key')
def main():
@@ -309,6 +357,7 @@ def main():
verify=dict(type='bool', default=True),
force_update=dict(type='bool', default=False),
keyring=dict(type='path', default='/etc/pacman.d/gnupg'),
+ ensure_trusted=dict(type='bool', default=False),
state=dict(type='str', default='present', choices=['absent', 'present']),
),
supports_check_mode=True,
diff --git a/plugins/modules/pagerduty.py b/plugins/modules/pagerduty.py
index 0c14688dbd..78443e8410 100644
--- a/plugins/modules/pagerduty.py
+++ b/plugins/modules/pagerduty.py
@@ -13,7 +13,7 @@ DOCUMENTATION = r"""
module: pagerduty
short_description: Create PagerDuty maintenance windows
description:
- - This module will let you create PagerDuty maintenance windows.
+ - This module lets you create PagerDuty maintenance windows.
author:
- "Andrew Newdigate (@suprememoocow)"
- "Dylan Silva (@thaumos)"
@@ -79,8 +79,8 @@ options:
default: Created by Ansible
validate_certs:
description:
- - If V(false), SSL certificates will not be validated. This should only be used on personally controlled sites using
- self-signed certificates.
+ - If V(false), SSL certificates are not validated. This should only be used on personally controlled sites using self-signed
+ certificates.
type: bool
default: true
"""
@@ -203,7 +203,7 @@ class PagerDutyRequest(object):
return False, json_out, True
def _create_services_payload(self, service):
- if (isinstance(service, list)):
+ if isinstance(service, list):
return [{'id': s, 'type': 'service_reference'} for s in service]
else:
return [{'id': service, 'type': 'service_reference'}]
@@ -242,15 +242,15 @@ def main():
module = AnsibleModule(
argument_spec=dict(
state=dict(required=True, choices=['running', 'started', 'ongoing', 'absent']),
- name=dict(required=False),
- user=dict(required=False),
+ name=dict(),
+ user=dict(),
token=dict(required=True, no_log=True),
- service=dict(required=False, type='list', elements='str', aliases=["services"]),
- window_id=dict(required=False),
- requester_id=dict(required=False),
- hours=dict(default='1', required=False), # @TODO change to int?
- minutes=dict(default='0', required=False), # @TODO change to int?
- desc=dict(default='Created by Ansible', required=False),
+ service=dict(type='list', elements='str', aliases=["services"]),
+ window_id=dict(),
+ requester_id=dict(),
+ hours=dict(default='1'), # @TODO change to int?
+ minutes=dict(default='0'), # @TODO change to int?
+ desc=dict(default='Created by Ansible'),
validate_certs=dict(default=True, type='bool'),
)
)
diff --git a/plugins/modules/pagerduty_alert.py b/plugins/modules/pagerduty_alert.py
index 347e849822..e3d93e8718 100644
--- a/plugins/modules/pagerduty_alert.py
+++ b/plugins/modules/pagerduty_alert.py
@@ -12,7 +12,7 @@ DOCUMENTATION = r"""
module: pagerduty_alert
short_description: Trigger, acknowledge or resolve PagerDuty incidents
description:
- - This module will let you trigger, acknowledge or resolve a PagerDuty incident by sending events.
+ - This module lets you trigger, acknowledge or resolve a PagerDuty incident by sending events.
author:
- "Amanpreet Singh (@ApsOps)"
- "Xiao Shen (@xshen1)"
@@ -43,7 +43,7 @@ options:
service_id:
type: str
description:
- - ID of PagerDuty service when incidents will be triggered, acknowledged or resolved.
+ - ID of PagerDuty service when incidents are triggered, acknowledged or resolved.
- Required if O(api_version=v1).
service_key:
type: str
@@ -92,10 +92,9 @@ options:
type: str
description:
- For O(state=triggered) - Required. Short description of the problem that led to this trigger. This field (or a truncated
- version) will be used when generating phone calls, SMS messages and alert emails. It will also appear on the incidents
- tables in the PagerDuty UI. The maximum length is 1024 characters.
- - For O(state=acknowledged) or O(state=resolved) - Text that will appear in the incident's log associated with this
- event.
+ version) is used when generating phone calls, SMS messages and alert emails. It also appears on the incidents tables
+ in the PagerDuty UI. The maximum length is 1024 characters.
+ - For O(state=acknowledged) or O(state=resolved) - Text that appears in the incident's log associated with this event.
default: Created via Ansible
incident_class:
type: str
@@ -106,12 +105,11 @@ options:
type: str
description:
- Identifies the incident to which this O(state) should be applied.
- - For O(state=triggered) - If there is no open (in other words unresolved) incident with this key, a new one will be
- created. If there is already an open incident with a matching key, this event will be appended to that incident's
- log. The event key provides an easy way to 'de-dup' problem reports. If no O(incident_key) is provided, then it will
- be generated by PagerDuty.
+ - For O(state=triggered) - If there is no open (in other words unresolved) incident with this key, a new one is created.
+ If there is already an open incident with a matching key, this event is appended to that incident's log. The event
+ key provides an easy way to 'de-dup' problem reports. If no O(incident_key) is provided, then it is generated by PagerDuty.
- For O(state=acknowledged) or O(state=resolved) - This should be the incident_key you received back when the incident
- was first opened by a trigger event. Acknowledge events referencing resolved or nonexistent incidents will be discarded.
+ was first opened by a trigger event. Acknowledge events referencing resolved or nonexistent incidents is discarded.
link_url:
type: str
description:
diff --git a/plugins/modules/pagerduty_change.py b/plugins/modules/pagerduty_change.py
index 2b63859f1d..de77016969 100644
--- a/plugins/modules/pagerduty_change.py
+++ b/plugins/modules/pagerduty_change.py
@@ -13,8 +13,8 @@ module: pagerduty_change
short_description: Track a code or infrastructure change as a PagerDuty change event
version_added: 1.3.0
description:
- - This module will let you create a PagerDuty change event each time the module is run.
- - This is not an idempotent action and a new change event will be created each time it is run.
+ - This module lets you create a PagerDuty change event each time the module is run.
+ - This is not an idempotent action and a new change event is created each time it is run.
author:
- Adam Vaughan (@adamvaughan)
requirements:
@@ -82,7 +82,7 @@ options:
type: str
validate_certs:
description:
- - If V(false), SSL certificates for the target URL will not be validated. This should only be used on personally controlled
+ - If V(false), SSL certificates for the target URL are not validated. This should only be used on personally controlled
sites using self-signed certificates.
required: false
default: true
@@ -121,15 +121,14 @@ def main():
argument_spec=dict(
integration_key=dict(required=True, type='str', no_log=True),
summary=dict(required=True, type='str'),
- source=dict(required=False, default='Ansible', type='str'),
- user=dict(required=False, type='str'),
- repo=dict(required=False, type='str'),
- revision=dict(required=False, type='str'),
- environment=dict(required=False, type='str'),
- link_url=dict(required=False, type='str'),
- link_text=dict(required=False, type='str'),
- url=dict(required=False,
- default='https://events.pagerduty.com/v2/change/enqueue', type='str'),
+ source=dict(default='Ansible', type='str'),
+ user=dict(type='str'),
+ repo=dict(type='str'),
+ revision=dict(type='str'),
+ environment=dict(type='str'),
+ link_url=dict(type='str'),
+ link_text=dict(type='str'),
+ url=dict(default='https://events.pagerduty.com/v2/change/enqueue', type='str'),
validate_certs=dict(default=True, type='bool')
),
supports_check_mode=True
diff --git a/plugins/modules/pagerduty_user.py b/plugins/modules/pagerduty_user.py
index e03342c792..0830af97f3 100644
--- a/plugins/modules/pagerduty_user.py
+++ b/plugins/modules/pagerduty_user.py
@@ -188,7 +188,7 @@ def main():
state=dict(type='str', default='present', choices=['present', 'absent']),
pd_role=dict(type='str', default='responder',
choices=['global_admin', 'manager', 'responder', 'observer', 'stakeholder', 'limited_stakeholder', 'restricted_access']),
- pd_teams=dict(type='list', elements='str', required=False)),
+ pd_teams=dict(type='list', elements='str')),
required_if=[['state', 'present', ['pd_teams']], ],
supports_check_mode=True,
)
diff --git a/plugins/modules/pam_limits.py b/plugins/modules/pam_limits.py
index d21781ac6c..536ba59662 100644
--- a/plugins/modules/pam_limits.py
+++ b/plugins/modules/pam_limits.py
@@ -80,7 +80,7 @@ options:
default: false
use_min:
description:
- - If set to V(true), the minimal value will be used or conserved.
+ - If set to V(true), the minimal value is used or conserved.
- If the specified value is inferior to the value in the file, file content is replaced with the new value, else content
is not modified.
required: false
@@ -88,7 +88,7 @@ options:
default: false
use_max:
description:
- - If set to V(true), the maximal value will be used or conserved.
+ - If set to V(true), the maximal value is used or conserved.
- If the specified value is superior to the value in the file, file content is replaced with the new value, else content
is not modified.
required: false
@@ -183,7 +183,7 @@ def main():
use_min=dict(default=False, type='bool'),
backup=dict(default=False, type='bool'),
dest=dict(default=limits_conf, type='str'),
- comment=dict(required=False, default='', type='str')
+ comment=dict(default='', type='str')
),
supports_check_mode=True,
)
diff --git a/plugins/modules/pamd.py b/plugins/modules/pamd.py
index ec2127483e..327316aa37 100644
--- a/plugins/modules/pamd.py
+++ b/plugins/modules/pamd.py
@@ -68,21 +68,20 @@ options:
type: str
module_arguments:
description:
- - When O(state=updated), the O(module_arguments) will replace existing module_arguments.
- - When O(state=args_absent) args matching those listed in O(module_arguments) will be removed.
+ - When O(state=updated), the O(module_arguments) replace existing module_arguments.
+ - When O(state=args_absent) args matching those listed in O(module_arguments) are removed.
- When O(state=args_present) any args listed in O(module_arguments) are added if missing from the existing rule.
- - Furthermore, if the module argument takes a value denoted by C(=), the value will be changed to that specified in
- module_arguments.
+ - Furthermore, if the module argument takes a value denoted by C(=), the value changes to that specified in module_arguments.
type: list
elements: str
state:
description:
- - The default of V(updated) will modify an existing rule if type, control and module_path all match an existing rule.
- - With V(before), the new rule will be inserted before a rule matching type, control and module_path.
- - Similarly, with V(after), the new rule will be inserted after an existing rulematching type, control and module_path.
+ - The default of V(updated) modifies an existing rule if type, control and module_path all match an existing rule.
+ - With V(before), the new rule is inserted before a rule matching type, control and module_path.
+ - Similarly, with V(after), the new rule is inserted after an existing rulematching type, control and module_path.
- With either V(before) or V(after) O(new_type), O(new_control), and O(new_module_path) must all be specified.
- - If state is V(args_absent) or V(args_present), O(new_type), O(new_control), and O(new_module_path) will be ignored.
- - State V(absent) will remove the rule.
+ - If state is V(args_absent) or V(args_present), O(new_type), O(new_control), and O(new_module_path) are ignored.
+ - State V(absent) removes the rule.
type: str
choices: [absent, before, after, args_absent, args_present, updated]
default: updated
diff --git a/plugins/modules/parted.py b/plugins/modules/parted.py
index 98f8f4d647..4bf0897afc 100644
--- a/plugins/modules/parted.py
+++ b/plugins/modules/parted.py
@@ -50,7 +50,7 @@ options:
type: int
unit:
description:
- - Selects the current default unit that Parted will use to display locations and capacities on the disk and to interpret
+ - Selects the current default unit that Parted uses to display locations and capacities on the disk and to interpret
those given by the user if they are not suffixed by an unit.
- When fetching information about a disk, it is recommended to always specify a unit.
type: str
@@ -59,8 +59,7 @@ options:
label:
description:
- Disk label type or partition table to use.
- - If O(device) already contains a different label, it will be changed to O(label) and any previous partitions will be
- lost.
+ - If O(device) already contains a different label, it is changed to O(label) and any previous partitions are lost.
- A O(name) must be specified for a V(gpt) partition table.
type: str
choices: [aix, amiga, bsd, dvh, gpt, loop, mac, msdos, pc98, sun]
@@ -74,8 +73,8 @@ options:
default: primary
part_start:
description:
- - Where the partition will start as offset from the beginning of the disk, that is, the "distance" from the start of
- the disk. Negative numbers specify distance from the end of the disk.
+ - Where the partition starts as offset from the beginning of the disk, that is, the "distance" from the start of the
+ disk. Negative numbers specify distance from the end of the disk.
- The distance can be specified with all the units supported by parted (except compat) and it is case sensitive, for
example V(10GiB), V(15%).
- Using negative values may require setting of O(fs_type) (see notes).
@@ -83,8 +82,8 @@ options:
default: 0%
part_end:
description:
- - Where the partition will end as offset from the beginning of the disk, that is, the "distance" from the start of the
- disk. Negative numbers specify distance from the end of the disk.
+ - Where the partition ends as offset from the beginning of the disk, that is, the "distance" from the start of the disk.
+ Negative numbers specify distance from the end of the disk.
- The distance can be specified with all the units supported by parted (except compat) and it is case sensitive, for
example V(10GiB), V(15%).
type: str
@@ -100,13 +99,13 @@ options:
state:
description:
- Whether to create or delete a partition.
- - If set to V(info) the module will only return the device information.
+ - If set to V(info) the module only returns the device information.
type: str
choices: [absent, present, info]
default: info
fs_type:
description:
- - If specified and the partition does not exist, will set filesystem type to given partition.
+ - If specified and the partition does not exist, sets filesystem type to given partition.
- Parameter optional, but see notes below about negative O(part_start) values.
type: str
version_added: '0.2.0'
@@ -140,35 +139,31 @@ partition_info:
script:
description: Parted script executed by module.
type: str
- sample: {
- "disk": {
- "dev": "/dev/sdb",
- "logical_block": 512,
- "model": "VMware Virtual disk",
- "physical_block": 512,
- "size": 5.0,
- "table": "msdos",
- "unit": "gib"
- },
- "partitions": [{
- "begin": 0.0,
- "end": 1.0,
- "flags": ["boot", "lvm"],
- "fstype": "",
- "name": "",
- "num": 1,
+ sample:
+ "disk":
+ "dev": "/dev/sdb"
+ "logical_block": 512
+ "model": "VMware Virtual disk"
+ "physical_block": 512
+ "size": 5.0
+ "table": "msdos"
+ "unit": "gib"
+ "partitions":
+ - "begin": 0.0
+ "end": 1.0
+ "flags": ["boot", "lvm"]
+ "fstype": ""
+ "name": ""
+ "num": 1
"size": 1.0
- }, {
- "begin": 1.0,
- "end": 5.0,
- "flags": [],
- "fstype": "",
- "name": "",
- "num": 2,
+ - "begin": 1.0
+ "end": 5.0
+ "flags": []
+ "fstype": ""
+ "name": ""
+ "num": 2
"size": 4.0
- }],
- "script": "unit KiB print "
- }
+ "script": "unit KiB print "
"""
EXAMPLES = r"""
diff --git a/plugins/modules/pear.py b/plugins/modules/pear.py
index 05135925bc..5eb84b509d 100644
--- a/plugins/modules/pear.py
+++ b/plugins/modules/pear.py
@@ -47,14 +47,14 @@ options:
description:
- List of regular expressions that can be used to detect prompts during pear package installation to answer the expected
question.
- - Prompts will be processed in the same order as the packages list.
+ - Prompts are processed in the same order as the packages list.
- You can optionally specify an answer to any question in the list.
- - If no answer is provided, the list item will only contain the regular expression.
- - "To specify an answer, the item will be a dict with the regular expression as key and the answer as value C(my_regular_expression:
+ - If no answer is provided, the list item must contain only the regular expression.
+ - "To specify an answer, the item must be a dictionary with the regular expression as key and the answer as value C(my_regular_expression:
'an_answer')."
- You can provide a list containing items with or without answer.
- - A prompt list can be shorter or longer than the packages list but will issue a warning.
- - If you want to specify that a package will not need prompts in the middle of a list, V(null).
+ - A prompt list can be shorter or longer than the packages list but it issues a warning.
+ - If you want to specify that a package does not need prompts in the middle of a list, V(null).
type: list
elements: raw
version_added: 0.2.0
@@ -293,8 +293,8 @@ def main():
argument_spec=dict(
name=dict(aliases=['pkg'], required=True),
state=dict(default='present', choices=['present', 'installed', "latest", 'absent', 'removed']),
- executable=dict(default=None, required=False, type='path'),
- prompts=dict(default=None, required=False, type='list', elements='raw'),
+ executable=dict(type='path'),
+ prompts=dict(type='list', elements='raw'),
),
supports_check_mode=True)
diff --git a/plugins/modules/pingdom.py b/plugins/modules/pingdom.py
index 192dd244f2..7c82063ab9 100644
--- a/plugins/modules/pingdom.py
+++ b/plugins/modules/pingdom.py
@@ -12,7 +12,7 @@ DOCUMENTATION = r"""
module: pingdom
short_description: Pause/unpause Pingdom alerts
description:
- - This module will let you pause/unpause Pingdom alerts.
+ - This module lets you pause/unpause Pingdom alerts.
author:
- "Dylan Silva (@thaumos)"
- "Justin Johns (!UNKNOWN)"
@@ -132,10 +132,10 @@ def main():
passwd = module.params['passwd']
key = module.params['key']
- if (state == "paused" or state == "stopped"):
+ if state == "paused" or state == "stopped":
(rc, name, result) = pause(checkid, uid, passwd, key)
- if (state == "running" or state == "started"):
+ if state == "running" or state == "started":
(rc, name, result) = unpause(checkid, uid, passwd, key)
if rc != 0:
diff --git a/plugins/modules/pip_package_info.py b/plugins/modules/pip_package_info.py
index 0be9b34fe9..80bdedf7fe 100644
--- a/plugins/modules/pip_package_info.py
+++ b/plugins/modules/pip_package_info.py
@@ -20,8 +20,8 @@ extends_documentation_fragment:
options:
clients:
description:
- - A list of the pip executables that will be used to get the packages. They can be supplied with the full path or just
- the executable name, for example V(pip3.7).
+ - A list of the pip executables that are used to get the packages. They can be supplied with the full path or just the
+ executable name, for example V(pip3.7).
default: ['pip']
required: false
type: list
@@ -59,37 +59,39 @@ packages:
returned: always
type: dict
sample:
- "packages": {
+ {
+ "packages": {
"pip": {
- "Babel": [
- {
- "name": "Babel",
- "source": "pip",
- "version": "2.6.0"
- }
- ],
- "Flask": [
- {
- "name": "Flask",
- "source": "pip",
- "version": "1.0.2"
- }
- ],
- "Flask-SQLAlchemy": [
- {
- "name": "Flask-SQLAlchemy",
- "source": "pip",
- "version": "2.3.2"
- }
- ],
- "Jinja2": [
- {
- "name": "Jinja2",
- "source": "pip",
- "version": "2.10"
- }
- ],
- },
+ "Babel": [
+ {
+ "name": "Babel",
+ "source": "pip",
+ "version": "2.6.0"
+ }
+ ],
+ "Flask": [
+ {
+ "name": "Flask",
+ "source": "pip",
+ "version": "1.0.2"
+ }
+ ],
+ "Flask-SQLAlchemy": [
+ {
+ "name": "Flask-SQLAlchemy",
+ "source": "pip",
+ "version": "2.3.2"
+ }
+ ],
+ "Jinja2": [
+ {
+ "name": "Jinja2",
+ "source": "pip",
+ "version": "2.10"
+ }
+ ]
+ }
+ }
}
"""
diff --git a/plugins/modules/pipx.py b/plugins/modules/pipx.py
index e7806d4e75..778810be0c 100644
--- a/plugins/modules/pipx.py
+++ b/plugins/modules/pipx.py
@@ -54,11 +54,17 @@ options:
name:
type: str
description:
- - The name of the application. In C(pipx) documentation it is also referred to as the name of the virtual environment
- where the application will be installed.
+ - The name of the application and also the name of the Python package being installed.
+ - In C(pipx) documentation it is also referred to as the name of the virtual environment where the application is installed.
- If O(name) is a simple package name without version specifiers, then that name is used as the Python package name
to be installed.
- - Use O(source) for passing package specifications or installing from URLs or directories.
+ - Starting in community.general 10.7.0, you can use package specifiers when O(state=present) or O(state=install). For
+ example, O(name=tox<4.0.0) or O(name=tox>3.0.27).
+ - Please note that when you use O(state=present) and O(name) with version specifiers, contrary to the behavior of C(pipx),
+ this module honors the version specifier and installs a version of the application that satisfies it. If you want
+ to ensure the reinstallation of the application even when the version specifier is met, then you must use O(force=true),
+ or perhaps use O(state=upgrade) instead.
+ - Use O(source) for installing from URLs or directories.
source:
type: str
description:
@@ -69,6 +75,7 @@ options:
- The value of this option is passed as-is to C(pipx).
- O(name) is still required when using O(source) to establish the application name without fetching the package from
a remote source.
+ - The module is not idempotent when using O(source).
install_apps:
description:
- Add apps from the injected packages.
@@ -92,6 +99,7 @@ options:
description:
- Force modification of the application's virtual environment. See C(pipx) for details.
- Only used when O(state=install), O(state=upgrade), O(state=upgrade_all), O(state=latest), or O(state=inject).
+ - The module is not idempotent when O(force=true).
type: bool
default: false
include_injected:
@@ -144,10 +152,10 @@ options:
with O(community.general.pipx_info#module:include_raw=true) and obtaining the content from the RV(community.general.pipx_info#module:raw_output).
type: path
version_added: 9.4.0
-notes:
- - This first implementation does not verify whether a specified version constraint has been installed or not. Hence, when
- using version operators, C(pipx) module will always try to execute the operation, even when the application was previously
- installed. This feature will be added in the future.
+requirements:
+ - When using O(name) with version specifiers, the Python package C(packaging) is required.
+ - If the package C(packaging) is at a version lesser than C(22.0.0), it fails silently when processing invalid specifiers,
+ like C(tox<<<<4.0).
author:
- "Alexei Znamensky (@russoz)"
"""
@@ -167,6 +175,12 @@ EXAMPLES = r"""
name: tox
state: upgrade
+- name: Install or upgrade tox with dependency group 'docs'
+ community.general.pipx:
+ name: tox
+ source: tox[docs]
+ state: latest
+
- name: Reinstall black with specific Python version
community.general.pipx:
name: black
@@ -201,7 +215,9 @@ version:
from ansible_collections.community.general.plugins.module_utils.module_helper import StateModuleHelper
-from ansible_collections.community.general.plugins.module_utils.pipx import pipx_runner, pipx_common_argspec, make_process_list
+from ansible_collections.community.general.plugins.module_utils.pipx import pipx_runner, pipx_common_argspec, make_process_dict
+from ansible_collections.community.general.plugins.module_utils.pkg_req import PackageRequirement
+from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
from ansible.module_utils.facts.compat import ansible_facts
@@ -255,21 +271,15 @@ class PipX(StateModuleHelper):
),
supports_check_mode=True,
)
- use_old_vardict = False
def _retrieve_installed(self):
- name = _make_name(self.vars.name, self.vars.suffix)
- output_process = make_process_list(self, include_injected=True, name=name)
- installed = self.runner('_list global', output_process=output_process).run()
+ output_process = make_process_dict(include_injected=True)
+ installed, dummy = self.runner('_list global', output_process=output_process).run()
- if name is not None:
- app_list = [app for app in installed if app['name'] == name]
- if app_list:
- return {name: app_list[0]}
- else:
- return {}
+ if self.app_name is None:
+ return installed
- return installed
+ return {k: v for k, v in installed.items() if k == self.app_name}
def __init_module__(self):
if self.vars.executable:
@@ -279,12 +289,20 @@ class PipX(StateModuleHelper):
self.command = [facts['python']['executable'], '-m', 'pipx']
self.runner = pipx_runner(self.module, self.command)
+ pkg_req = PackageRequirement(self.module, self.vars.name)
+ self.parsed_name = pkg_req.parsed_name
+ self.parsed_req = pkg_req.requirement
+ self.app_name = _make_name(self.parsed_name, self.vars.suffix)
+
self.vars.set('application', self._retrieve_installed(), change=True, diff=True)
with self.runner("version") as ctx:
rc, out, err = ctx.run()
self.vars.version = out.strip()
+ if LooseVersion(self.vars.version) < LooseVersion("1.7.0"):
+ self.do_raise("The pipx tool must be at least at version 1.7.0")
+
def __quit_module__(self):
self.vars.application = self._retrieve_installed()
@@ -295,12 +313,27 @@ class PipX(StateModuleHelper):
self.vars.set('run_info', ctx.run_info, verbosity=4)
def state_install(self):
- if not self.vars.application or self.vars.force:
- self.changed = True
- args_order = 'state global index_url install_deps force python system_site_packages editable pip_args suffix name_source'
- with self.runner(args_order, check_mode_skip=True) as ctx:
- ctx.run(name_source=[self.vars.name, self.vars.source])
- self._capture_results(ctx)
+ # If we have a version spec and no source, use the version spec as source
+ if self.parsed_req and not self.vars.source:
+ self.vars.source = self.vars.name
+
+ if self.vars.application.get(self.app_name):
+ is_installed = True
+ version_match = self.vars.application[self.app_name]['version'] in self.parsed_req.specifier if self.parsed_req else True
+ force = self.vars.force or (not version_match)
+ else:
+ is_installed = False
+ version_match = False
+ force = self.vars.force
+
+ if is_installed and version_match and not force:
+ return
+
+ self.changed = True
+ args_order = 'state global index_url install_deps force python system_site_packages editable pip_args suffix name_source'
+ with self.runner(args_order, check_mode_skip=True) as ctx:
+ ctx.run(name_source=[self.parsed_name, self.vars.source], force=force)
+ self._capture_results(ctx)
state_present = state_install
diff --git a/plugins/modules/pipx_info.py b/plugins/modules/pipx_info.py
index 91d2fdb21c..fc74df496a 100644
--- a/plugins/modules/pipx_info.py
+++ b/plugins/modules/pipx_info.py
@@ -115,7 +115,15 @@ cmd:
returned: success
type: list
elements: str
- sample: ["/usr/bin/python3.10", "-m", "pipx", "list", "--include-injected", "--json"]
+ sample:
+ [
+ "/usr/bin/python3.10",
+ "-m",
+ "pipx",
+ "list",
+ "--include-injected",
+ "--json"
+ ]
version:
description: Version of pipx.
@@ -126,7 +134,8 @@ version:
"""
from ansible_collections.community.general.plugins.module_utils.module_helper import ModuleHelper
-from ansible_collections.community.general.plugins.module_utils.pipx import pipx_runner, pipx_common_argspec, make_process_list
+from ansible_collections.community.general.plugins.module_utils.pipx import pipx_runner, pipx_common_argspec, make_process_dict
+from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
from ansible.module_utils.facts.compat import ansible_facts
@@ -144,7 +153,6 @@ class PipXInfo(ModuleHelper):
argument_spec=argument_spec,
supports_check_mode=True,
)
- use_old_vardict = False
def __init_module__(self):
if self.vars.executable:
@@ -157,10 +165,24 @@ class PipXInfo(ModuleHelper):
rc, out, err = ctx.run()
self.vars.version = out.strip()
+ if LooseVersion(self.vars.version) < LooseVersion("1.7.0"):
+ self.do_raise("The pipx tool must be at least at version 1.7.0")
+
def __run__(self):
- output_process = make_process_list(self, **self.vars.as_dict())
+ output_process = make_process_dict(self.vars.include_injected, self.vars.include_deps)
with self.runner('_list global', output_process=output_process) as ctx:
- self.vars.application = ctx.run()
+ applications, raw_data = ctx.run()
+ if self.vars.include_raw:
+ self.vars.raw_output = raw_data
+
+ if self.vars.name:
+ self.vars.application = [
+ v
+ for k, v in applications.items()
+ if k == self.vars.name
+ ]
+ else:
+ self.vars.application = list(applications.values())
self._capture_results(ctx)
def _capture_results(self, ctx):
diff --git a/plugins/modules/pkg5_publisher.py b/plugins/modules/pkg5_publisher.py
index 01c9d48cce..26abded4e2 100644
--- a/plugins/modules/pkg5_publisher.py
+++ b/plugins/modules/pkg5_publisher.py
@@ -16,7 +16,7 @@ author: "Peter Oliver (@mavit)"
short_description: Manages Solaris 11 Image Packaging System publishers
description:
- IPS packages are the native packages in Solaris 11 and higher.
- - This modules will configure which publishers a client will download IPS packages from.
+ - This module configures which publishers a client downloads IPS packages from.
extends_documentation_fragment:
- community.general.attributes
attributes:
diff --git a/plugins/modules/pkgin.py b/plugins/modules/pkgin.py
index 21a3b10016..8695f1b5af 100644
--- a/plugins/modules/pkgin.py
+++ b/plugins/modules/pkgin.py
@@ -26,7 +26,7 @@ author:
- "Shaun Zinck (@szinck)"
- "Jasper Lievisse Adriaanse (@jasperla)"
notes:
- - 'Known bug with pkgin < 0.8.0: if a package is removed and another package depends on it, the other package will be silently
+ - 'Known bug with pkgin < 0.8.0: if a package is removed and another package depends on it, the other package is silently
removed as well.'
extends_documentation_fragment:
- community.general.attributes
diff --git a/plugins/modules/pkgng.py b/plugins/modules/pkgng.py
index 582abd3649..58eafb9e0c 100644
--- a/plugins/modules/pkgng.py
+++ b/plugins/modules/pkgng.py
@@ -30,7 +30,7 @@ options:
name:
description:
- Name or list of names of packages to install/remove.
- - With O(name=*), O(state=latest) will operate, but O(state=present) and O(state=absent) will be noops.
+ - With O(name=*), O(state=latest) operates, but O(state=present) and O(state=absent) are noops.
required: true
aliases: [pkg]
type: list
@@ -65,19 +65,19 @@ options:
type: str
rootdir:
description:
- - For C(pkgng) versions 1.5 and later, pkg will install all packages within the specified root directory.
+ - For C(pkgng) versions 1.5 and later, pkg installs all packages within the specified root directory.
- Can not be used together with O(chroot) or O(jail) options.
required: false
type: path
chroot:
description:
- - Pkg will chroot in the specified environment.
+ - Pkg chroots in the specified environment.
- Can not be used together with O(rootdir) or O(jail) options.
required: false
type: path
jail:
description:
- - Pkg will execute in the given jail name or ID.
+ - Pkg executes in the given jail name or ID.
- Can not be used together with O(chroot) or O(rootdir) options.
type: str
autoremove:
@@ -103,8 +103,8 @@ options:
version_added: 9.3.0
author: "bleader (@bleader)"
notes:
- - When using pkgsite, be careful that already in cache packages will not be downloaded again.
- - When used with a C(loop:) each package will be processed individually, it is much more efficient to pass the list directly
+ - When using pkgsite, be careful that already in cache packages are not downloaded again.
+ - When used with a C(loop:) each package is processed individually, it is much more efficient to pass the list directly
to the O(name) option.
"""
@@ -422,17 +422,17 @@ def autoremove_packages(module, run_pkgng):
def main():
module = AnsibleModule(
argument_spec=dict(
- state=dict(default="present", choices=["present", "latest", "absent"], required=False),
+ state=dict(default="present", choices=["present", "latest", "absent"]),
name=dict(aliases=["pkg"], required=True, type='list', elements='str'),
cached=dict(default=False, type='bool'),
- ignore_osver=dict(default=False, required=False, type='bool'),
- annotation=dict(required=False, type='list', elements='str'),
- pkgsite=dict(required=False),
- rootdir=dict(required=False, type='path'),
- chroot=dict(required=False, type='path'),
- jail=dict(required=False, type='str'),
+ ignore_osver=dict(default=False, type='bool'),
+ annotation=dict(type='list', elements='str'),
+ pkgsite=dict(),
+ rootdir=dict(type='path'),
+ chroot=dict(type='path'),
+ jail=dict(type='str'),
autoremove=dict(default=False, type='bool'),
- use_globs=dict(default=True, required=False, type='bool'),
+ use_globs=dict(default=True, type='bool'),
),
supports_check_mode=True,
mutually_exclusive=[["rootdir", "chroot", "jail"]])
diff --git a/plugins/modules/pkgutil.py b/plugins/modules/pkgutil.py
index 7eb18cdb20..a40bff06ec 100644
--- a/plugins/modules/pkgutil.py
+++ b/plugins/modules/pkgutil.py
@@ -17,7 +17,7 @@ module: pkgutil
short_description: OpenCSW package management on Solaris
description:
- This module installs, updates and removes packages from the OpenCSW project for Solaris.
- - Unlike the M(community.general.svr4pkg) module, it will resolve and download dependencies.
+ - Unlike the M(community.general.svr4pkg) module, it resolves and downloads dependencies.
- See U(https://www.opencsw.org/) for more information about the project.
author:
- Alexander Winkler (@dermute)
@@ -50,7 +50,7 @@ options:
state:
description:
- Whether to install (V(present)/V(installed)), or remove (V(absent)/V(removed)) packages.
- - The upgrade (V(latest)) operation will update/install the packages to the latest version available.
+ - The upgrade (V(latest)) operation updates/installs the packages to the latest version available.
type: str
required: true
choices: [absent, installed, latest, present, removed]
diff --git a/plugins/modules/pmem.py b/plugins/modules/pmem.py
index 66c1d27033..1555ec842e 100644
--- a/plugins/modules/pmem.py
+++ b/plugins/modules/pmem.py
@@ -46,9 +46,9 @@ options:
type: int
reserved:
description:
- - Percentage of the capacity to reserve (V(0)-V(100)). O(reserved) will not be mapped into the system physical address
- space and will be presented as reserved capacity with Show Device and Show Memory Resources Commands.
- - O(reserved) will be set automatically if this is not configured.
+ - Percentage of the capacity to reserve (V(0)-V(100)). O(reserved) is not mapped into the system physical address space
+ and is presented as reserved capacity with Show Device and Show Memory Resources Commands.
+ - O(reserved) is set automatically if this is not configured.
type: int
required: false
socket:
@@ -147,20 +147,21 @@ result:
namespace:
description: The list of the detail of namespace.
type: list
- sample: [
- {
- "appdirect": 111669149696,
- "memorymode": 970662608896,
- "reserved": 3626500096,
- "socket": 0
- },
- {
- "appdirect": 111669149696,
- "memorymode": 970662608896,
- "reserved": 3626500096,
- "socket": 1
- }
- ]
+ sample:
+ [
+ {
+ "appdirect": 111669149696,
+ "memorymode": 970662608896,
+ "reserved": 3626500096,
+ "socket": 0
+ },
+ {
+ "appdirect": 111669149696,
+ "memorymode": 970662608896,
+ "reserved": 3626500096,
+ "socket": 1
+ }
+ ]
"""
EXAMPLES = r"""
diff --git a/plugins/modules/pnpm.py b/plugins/modules/pnpm.py
index c4dbf55dff..2dad63a608 100644
--- a/plugins/modules/pnpm.py
+++ b/plugins/modules/pnpm.py
@@ -77,14 +77,14 @@ options:
production:
description:
- Install dependencies in production mode.
- - Pnpm will ignore any dependencies under C(devDependencies) in package.json.
+ - Pnpm ignores any dependencies under C(devDependencies) in package.json.
required: false
type: bool
default: false
dev:
description:
- Install dependencies in development mode.
- - Pnpm will ignore any regular dependencies in C(package.json).
+ - Pnpm ignores any regular dependencies in C(package.json).
required: false
default: false
type: bool
@@ -339,11 +339,11 @@ class Pnpm(object):
def main():
arg_spec = dict(
- name=dict(default=None),
- alias=dict(default=None),
- path=dict(default=None, type="path"),
- version=dict(default=None),
- executable=dict(default=None, type="path"),
+ name=dict(),
+ alias=dict(),
+ path=dict(type="path"),
+ version=dict(),
+ executable=dict(type="path"),
ignore_scripts=dict(default=False, type="bool"),
no_optional=dict(default=False, type="bool"),
production=dict(default=False, type="bool"),
diff --git a/plugins/modules/portage.py b/plugins/modules/portage.py
index 4a1cb1b990..8a00b934dd 100644
--- a/plugins/modules/portage.py
+++ b/plugins/modules/portage.py
@@ -193,7 +193,7 @@ options:
quietfail:
description:
- Suppresses display of the build log on stdout (--quiet-fail).
- - Only the die message and the path of the build log will be displayed on stdout.
+ - Only the die message and the path of the build log are displayed on stdout.
type: bool
default: false
@@ -510,13 +510,13 @@ portage_absent_states = ['absent', 'unmerged', 'removed']
def main():
module = AnsibleModule(
argument_spec=dict(
- package=dict(type='list', elements='str', default=None, aliases=['name']),
+ package=dict(type='list', elements='str', aliases=['name']),
state=dict(
default=portage_present_states[0],
choices=portage_present_states + portage_absent_states,
),
update=dict(default=False, type='bool'),
- backtrack=dict(default=None, type='int'),
+ backtrack=dict(type='int'),
deep=dict(default=False, type='bool'),
newuse=dict(default=False, type='bool'),
changed_use=dict(default=False, type='bool'),
@@ -525,18 +525,18 @@ def main():
nodeps=dict(default=False, type='bool'),
onlydeps=dict(default=False, type='bool'),
depclean=dict(default=False, type='bool'),
- select=dict(default=None, type='bool'),
+ select=dict(type='bool'),
quiet=dict(default=False, type='bool'),
verbose=dict(default=False, type='bool'),
- sync=dict(default=None, choices=['yes', 'web', 'no']),
+ sync=dict(choices=['yes', 'web', 'no']),
getbinpkgonly=dict(default=False, type='bool'),
getbinpkg=dict(default=False, type='bool'),
usepkgonly=dict(default=False, type='bool'),
usepkg=dict(default=False, type='bool'),
keepgoing=dict(default=False, type='bool'),
- jobs=dict(default=None, type='int'),
- loadavg=dict(default=None, type='float'),
- withbdeps=dict(default=None, type='bool'),
+ jobs=dict(type='int'),
+ loadavg=dict(type='float'),
+ withbdeps=dict(type='bool'),
quietbuild=dict(default=False, type='bool'),
quietfail=dict(default=False, type='bool'),
),
diff --git a/plugins/modules/pritunl_org.py b/plugins/modules/pritunl_org.py
index a96a68c72e..f87813031b 100644
--- a/plugins/modules/pritunl_org.py
+++ b/plugins/modules/pritunl_org.py
@@ -35,9 +35,9 @@ options:
type: bool
default: false
description:
- - If O(force) is V(true) and O(state) is V(absent), the module will delete the organization, no matter if it contains
- users or not. By default O(force) is V(false), which will cause the module to fail the deletion of the organization
- when it contains users.
+ - If O(force) is V(true) and O(state) is V(absent), the module deletes the organization, no matter if it contains users
+ or not. By default O(force) is V(false), which causes the module to fail the deletion of the organization when it
+ contains users.
state:
type: str
default: 'present'
@@ -63,18 +63,18 @@ EXAMPLES = r"""
RETURN = r"""
response:
- description: JSON representation of a Pritunl Organization.
- returned: success
- type: dict
- sample:
- {
- "auth_api": false,
- "name": "Foo",
- "auth_token": null,
- "user_count": 0,
- "auth_secret": null,
- "id": "csftwlu6uhralzi2dpmhekz3",
- }
+ description: JSON representation of a Pritunl Organization.
+ returned: success
+ type: dict
+ sample:
+ {
+ "auth_api": false,
+ "name": "Foo",
+ "auth_token": null,
+ "user_count": 0,
+ "auth_secret": null,
+ "id": "csftwlu6uhralzi2dpmhekz3"
+ }
"""
@@ -176,10 +176,8 @@ def main():
argument_spec.update(
dict(
name=dict(required=True, type="str", aliases=["org"]),
- force=dict(required=False, type="bool", default=False),
- state=dict(
- required=False, choices=["present", "absent"], default="present"
- ),
+ force=dict(type="bool", default=False),
+ state=dict(choices=["present", "absent"], default="present"),
)
)
diff --git a/plugins/modules/pritunl_org_info.py b/plugins/modules/pritunl_org_info.py
index dc198bc9cc..952acd8963 100644
--- a/plugins/modules/pritunl_org_info.py
+++ b/plugins/modules/pritunl_org_info.py
@@ -27,7 +27,7 @@ options:
- org
default: null
description:
- - Name of the Pritunl organization to search for. If none provided, the module will return all Pritunl organizations.
+ - Name of the Pritunl organization to search for. If none provided, the module returns all Pritunl organizations.
"""
EXAMPLES = r"""
@@ -41,37 +41,37 @@ EXAMPLES = r"""
RETURN = r"""
organizations:
- description: List of Pritunl organizations.
- returned: success
- type: list
- elements: dict
- sample:
- [
- {
- "auth_api": false,
- "name": "FooOrg",
- "auth_token": null,
- "user_count": 0,
- "auth_secret": null,
- "id": "csftwlu6uhralzi2dpmhekz3",
- },
- {
- "auth_api": false,
- "name": "MyOrg",
- "auth_token": null,
- "user_count": 3,
- "auth_secret": null,
- "id": "58070daee63f3b2e6e472c36",
- },
- {
- "auth_api": false,
- "name": "BarOrg",
- "auth_token": null,
- "user_count": 0,
- "auth_secret": null,
- "id": "v1sncsxxybnsylc8gpqg85pg",
- }
- ]
+ description: List of Pritunl organizations.
+ returned: success
+ type: list
+ elements: dict
+ sample:
+ [
+ {
+ "auth_api": false,
+ "name": "FooOrg",
+ "auth_token": null,
+ "user_count": 0,
+ "auth_secret": null,
+ "id": "csftwlu6uhralzi2dpmhekz3"
+ },
+ {
+ "auth_api": false,
+ "name": "MyOrg",
+ "auth_token": null,
+ "user_count": 3,
+ "auth_secret": null,
+ "id": "58070daee63f3b2e6e472c36"
+ },
+ {
+ "auth_api": false,
+ "name": "BarOrg",
+ "auth_token": null,
+ "user_count": 0,
+ "auth_secret": null,
+ "id": "v1sncsxxybnsylc8gpqg85pg"
+ }
+ ]
"""
from ansible.module_utils.basic import AnsibleModule
@@ -113,7 +113,7 @@ def main():
argument_spec.update(
dict(
- organization=dict(required=False, type="str", default=None, aliases=["org"])
+ organization=dict(type="str", aliases=["org"])
)
)
diff --git a/plugins/modules/pritunl_user.py b/plugins/modules/pritunl_user.py
index dd8721d1ba..45de07eba6 100644
--- a/plugins/modules/pritunl_user.py
+++ b/plugins/modules/pritunl_user.py
@@ -115,35 +115,36 @@ EXAMPLES = r"""
RETURN = r"""
response:
- description: JSON representation of Pritunl Users.
- returned: success
- type: dict
- sample:
- {
- "audit": false,
- "auth_type": "google",
- "bypass_secondary": false,
- "client_to_client": false,
- "disabled": false,
- "dns_mapping": null,
- "dns_servers": null,
- "dns_suffix": null,
- "email": "foo@bar.com",
- "gravatar": true,
- "groups": [
- "foo", "bar"
- ],
- "id": "5d070dafe63q3b2e6s472c3b",
- "name": "foo@acme.com",
- "network_links": [],
- "organization": "58070daee6sf342e6e4s2c36",
- "organization_name": "Acme",
- "otp_auth": true,
- "otp_secret": "35H5EJA3XB2$4CWG",
- "pin": false,
- "port_forwarding": [],
- "servers": [],
- }
+ description: JSON representation of Pritunl Users.
+ returned: success
+ type: dict
+ sample:
+ {
+ "audit": false,
+ "auth_type": "google",
+ "bypass_secondary": false,
+ "client_to_client": false,
+ "disabled": false,
+ "dns_mapping": null,
+ "dns_servers": null,
+ "dns_suffix": null,
+ "email": "foo@bar.com",
+ "gravatar": true,
+ "groups": [
+ "foo",
+ "bar"
+ ],
+ "id": "5d070dafe63q3b2e6s472c3b",
+ "name": "foo@acme.com",
+ "network_links": [],
+ "organization": "58070daee6sf342e6e4s2c36",
+ "organization_name": "Acme",
+ "otp_auth": true,
+ "otp_secret": "35H5EJA3XB2$4CWG",
+ "pin": false,
+ "port_forwarding": [],
+ "servers": []
+ }
"""
@@ -319,18 +320,14 @@ def main():
argument_spec.update(
dict(
organization=dict(required=True, type="str", aliases=["org"]),
- state=dict(
- required=False, choices=["present", "absent"], default="present"
- ),
+ state=dict(choices=["present", "absent"], default="present"),
user_name=dict(required=True, type="str"),
- user_type=dict(
- required=False, choices=["client", "server"], default="client"
- ),
- user_email=dict(required=False, type="str", default=None),
- user_groups=dict(required=False, type="list", elements="str", default=None),
- user_disabled=dict(required=False, type="bool", default=None),
- user_gravatar=dict(required=False, type="bool", default=None),
- user_mac_addresses=dict(required=False, type="list", elements="str", default=None),
+ user_type=dict(choices=["client", "server"], default="client"),
+ user_email=dict(type="str"),
+ user_groups=dict(type="list", elements="str"),
+ user_disabled=dict(type="bool"),
+ user_gravatar=dict(type="bool"),
+ user_mac_addresses=dict(type="list", elements="str"),
)
)
diff --git a/plugins/modules/pritunl_user_info.py b/plugins/modules/pritunl_user_info.py
index 02d8512315..2e8180675a 100644
--- a/plugins/modules/pritunl_user_info.py
+++ b/plugins/modules/pritunl_user_info.py
@@ -58,38 +58,39 @@ EXAMPLES = r"""
RETURN = r"""
users:
- description: List of Pritunl users.
- returned: success
- type: list
- elements: dict
- sample:
- [
- {
- "audit": false,
- "auth_type": "google",
- "bypass_secondary": false,
- "client_to_client": false,
- "disabled": false,
- "dns_mapping": null,
- "dns_servers": null,
- "dns_suffix": null,
- "email": "foo@bar.com",
- "gravatar": true,
- "groups": [
- "foo", "bar"
- ],
- "id": "5d070dafe63q3b2e6s472c3b",
- "name": "foo@acme.com",
- "network_links": [],
- "organization": "58070daee6sf342e6e4s2c36",
- "organization_name": "Acme",
- "otp_auth": true,
- "otp_secret": "35H5EJA3XB2$4CWG",
- "pin": false,
- "port_forwarding": [],
- "servers": [],
- }
- ]
+ description: List of Pritunl users.
+ returned: success
+ type: list
+ elements: dict
+ sample:
+ [
+ {
+ "audit": false,
+ "auth_type": "google",
+ "bypass_secondary": false,
+ "client_to_client": false,
+ "disabled": false,
+ "dns_mapping": null,
+ "dns_servers": null,
+ "dns_suffix": null,
+ "email": "foo@bar.com",
+ "gravatar": true,
+ "groups": [
+ "foo",
+ "bar"
+ ],
+ "id": "5d070dafe63q3b2e6s472c3b",
+ "name": "foo@acme.com",
+ "network_links": [],
+ "organization": "58070daee6sf342e6e4s2c36",
+ "organization_name": "Acme",
+ "otp_auth": true,
+ "otp_secret": "35H5EJA3XB2$4CWG",
+ "pin": false,
+ "port_forwarding": [],
+ "servers": []
+ }
+ ]
"""
from ansible.module_utils.basic import AnsibleModule
@@ -150,12 +151,8 @@ def main():
argument_spec.update(
dict(
organization=dict(required=True, type="str", aliases=["org"]),
- user_name=dict(required=False, type="str", default=None),
- user_type=dict(
- required=False,
- choices=["client", "server"],
- default="client",
- ),
+ user_name=dict(type="str"),
+ user_type=dict(choices=["client", "server"], default="client"),
)
)
diff --git a/plugins/modules/profitbricks.py b/plugins/modules/profitbricks.py
deleted file mode 100644
index e72144f759..0000000000
--- a/plugins/modules/profitbricks.py
+++ /dev/null
@@ -1,671 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-# Copyright Ansible Project
-# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = r"""
-module: profitbricks
-short_description: Create, destroy, start, stop, and reboot a ProfitBricks virtual machine
-description:
- - Create, destroy, update, start, stop, and reboot a ProfitBricks virtual machine. When the virtual machine is created it
- can optionally wait for it to be 'running' before returning. This module has a dependency on profitbricks >= 1.0.0.
-deprecated:
- removed_in: 11.0.0
- why: Module relies on library unsupported since 2021.
- alternative: >
- Profitbricks has rebranded as Ionos Cloud and they provide a collection named ionoscloudsdk.ionoscloud.
- Whilst it is likely it will provide the features of this module, that has not been verified.
- Please refer to that collection's documentation for more details.
-extends_documentation_fragment:
- - community.general.attributes
-attributes:
- check_mode:
- support: none
- diff_mode:
- support: none
-options:
- auto_increment:
- description:
- - Whether or not to increment a single number in the name for created virtual machines.
- type: bool
- default: true
- name:
- description:
- - The name of the virtual machine.
- type: str
- image:
- description:
- - The system image ID for creating the virtual machine, for example V(a3eae284-a2fe-11e4-b187-5f1f641608c8).
- type: str
- image_password:
- description:
- - Password set for the administrative user.
- type: str
- ssh_keys:
- description:
- - Public SSH keys allowing access to the virtual machine.
- type: list
- elements: str
- default: []
- datacenter:
- description:
- - The datacenter to provision this virtual machine.
- type: str
- cores:
- description:
- - The number of CPU cores to allocate to the virtual machine.
- default: 2
- type: int
- ram:
- description:
- - The amount of memory to allocate to the virtual machine.
- default: 2048
- type: int
- cpu_family:
- description:
- - The CPU family type to allocate to the virtual machine.
- type: str
- default: AMD_OPTERON
- choices: ["AMD_OPTERON", "INTEL_XEON"]
- volume_size:
- description:
- - The size in GB of the boot volume.
- type: int
- default: 10
- bus:
- description:
- - The bus type for the volume.
- type: str
- default: VIRTIO
- choices: ["IDE", "VIRTIO"]
- instance_ids:
- description:
- - List of instance IDs, currently only used when state='absent' to remove instances.
- type: list
- elements: str
- default: []
- count:
- description:
- - The number of virtual machines to create.
- type: int
- default: 1
- location:
- description:
- - The datacenter location. Use only if you want to create the Datacenter or else this value is ignored.
- type: str
- default: us/las
- choices: ["us/las", "de/fra", "de/fkb"]
- assign_public_ip:
- description:
- - This will assign the machine to the public LAN. If no LAN exists with public Internet access it is created.
- type: bool
- default: false
- lan:
- description:
- - The ID of the LAN you wish to add the servers to.
- type: int
- default: 1
- subscription_user:
- description:
- - The ProfitBricks username. Overrides the E(PB_SUBSCRIPTION_ID) environment variable.
- type: str
- subscription_password:
- description:
- - THe ProfitBricks password. Overrides the E(PB_PASSWORD) environment variable.
- type: str
- wait:
- description:
- - Wait for the instance to be in state 'running' before returning.
- type: bool
- default: true
- wait_timeout:
- description:
- - How long before wait gives up, in seconds.
- type: int
- default: 600
- remove_boot_volume:
- description:
- - Remove the bootVolume of the virtual machine you are destroying.
- type: bool
- default: true
- state:
- description:
- - Create or terminate instances.
- - 'The choices available are: V(running), V(stopped), V(absent), V(present).'
- type: str
- default: 'present'
- disk_type:
- description:
- - The type of disk to be allocated.
- type: str
- choices: [SSD, HDD]
- default: HDD
-
-requirements:
- - "profitbricks"
-author: Matt Baldwin (@baldwinSPC)
-"""
-
-EXAMPLES = r"""
-# Note: These examples do not set authentication details, see the AWS Guide for details.
-
-# Provisioning example
-- name: Create three servers and enumerate their names
- community.general.profitbricks:
- datacenter: Tardis One
- name: web%02d.stackpointcloud.com
- cores: 4
- ram: 2048
- volume_size: 50
- cpu_family: INTEL_XEON
- image: a3eae284-a2fe-11e4-b187-5f1f641608c8
- location: us/las
- count: 3
- assign_public_ip: true
-
-- name: Remove virtual machines
- community.general.profitbricks:
- datacenter: Tardis One
- instance_ids:
- - 'web001.stackpointcloud.com'
- - 'web002.stackpointcloud.com'
- - 'web003.stackpointcloud.com'
- wait_timeout: 500
- state: absent
-
-- name: Start virtual machines
- community.general.profitbricks:
- datacenter: Tardis One
- instance_ids:
- - 'web001.stackpointcloud.com'
- - 'web002.stackpointcloud.com'
- - 'web003.stackpointcloud.com'
- wait_timeout: 500
- state: running
-
-- name: Stop virtual machines
- community.general.profitbricks:
- datacenter: Tardis One
- instance_ids:
- - 'web001.stackpointcloud.com'
- - 'web002.stackpointcloud.com'
- - 'web003.stackpointcloud.com'
- wait_timeout: 500
- state: stopped
-"""
-
-import re
-import uuid
-import time
-import traceback
-
-HAS_PB_SDK = True
-
-try:
- from profitbricks.client import ProfitBricksService, Volume, Server, Datacenter, NIC, LAN
-except ImportError:
- HAS_PB_SDK = False
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible.module_utils.six.moves import xrange
-from ansible.module_utils.common.text.converters import to_native
-
-
-LOCATIONS = ['us/las',
- 'de/fra',
- 'de/fkb']
-
-uuid_match = re.compile(
- r'[\w]{8}-[\w]{4}-[\w]{4}-[\w]{4}-[\w]{12}', re.I)
-
-
-def _wait_for_completion(profitbricks, promise, wait_timeout, msg):
- if not promise:
- return
- wait_timeout = time.time() + wait_timeout
- while wait_timeout > time.time():
- time.sleep(5)
- operation_result = profitbricks.get_request(
- request_id=promise['requestId'],
- status=True)
-
- if operation_result['metadata']['status'] == "DONE":
- return
- elif operation_result['metadata']['status'] == "FAILED":
- raise Exception(
- 'Request failed to complete ' + msg + ' "' + str(
- promise['requestId']) + '" to complete.')
-
- raise Exception(
- 'Timed out waiting for async operation ' + msg + ' "' + str(
- promise['requestId']
- ) + '" to complete.')
-
-
-def _create_machine(module, profitbricks, datacenter, name):
- cores = module.params.get('cores')
- ram = module.params.get('ram')
- cpu_family = module.params.get('cpu_family')
- volume_size = module.params.get('volume_size')
- disk_type = module.params.get('disk_type')
- image_password = module.params.get('image_password')
- ssh_keys = module.params.get('ssh_keys')
- bus = module.params.get('bus')
- lan = module.params.get('lan')
- assign_public_ip = module.params.get('assign_public_ip')
- subscription_user = module.params.get('subscription_user')
- subscription_password = module.params.get('subscription_password')
- location = module.params.get('location')
- image = module.params.get('image')
- assign_public_ip = module.boolean(module.params.get('assign_public_ip'))
- wait = module.params.get('wait')
- wait_timeout = module.params.get('wait_timeout')
-
- if assign_public_ip:
- public_found = False
-
- lans = profitbricks.list_lans(datacenter)
- for lan in lans['items']:
- if lan['properties']['public']:
- public_found = True
- lan = lan['id']
-
- if not public_found:
- i = LAN(
- name='public',
- public=True)
-
- lan_response = profitbricks.create_lan(datacenter, i)
- _wait_for_completion(profitbricks, lan_response,
- wait_timeout, "_create_machine")
- lan = lan_response['id']
-
- v = Volume(
- name=str(uuid.uuid4()).replace('-', '')[:10],
- size=volume_size,
- image=image,
- image_password=image_password,
- ssh_keys=ssh_keys,
- disk_type=disk_type,
- bus=bus)
-
- n = NIC(
- lan=int(lan)
- )
-
- s = Server(
- name=name,
- ram=ram,
- cores=cores,
- cpu_family=cpu_family,
- create_volumes=[v],
- nics=[n],
- )
-
- try:
- create_server_response = profitbricks.create_server(
- datacenter_id=datacenter, server=s)
-
- _wait_for_completion(profitbricks, create_server_response,
- wait_timeout, "create_virtual_machine")
-
- server_response = profitbricks.get_server(
- datacenter_id=datacenter,
- server_id=create_server_response['id'],
- depth=3
- )
- except Exception as e:
- module.fail_json(msg="failed to create the new server: %s" % str(e))
- else:
- return server_response
-
-
-def _startstop_machine(module, profitbricks, datacenter_id, server_id):
- state = module.params.get('state')
-
- try:
- if state == 'running':
- profitbricks.start_server(datacenter_id, server_id)
- else:
- profitbricks.stop_server(datacenter_id, server_id)
-
- return True
- except Exception as e:
- module.fail_json(msg="failed to start or stop the virtual machine %s at %s: %s" % (server_id, datacenter_id, str(e)))
-
-
-def _create_datacenter(module, profitbricks):
- datacenter = module.params.get('datacenter')
- location = module.params.get('location')
- wait_timeout = module.params.get('wait_timeout')
-
- i = Datacenter(
- name=datacenter,
- location=location
- )
-
- try:
- datacenter_response = profitbricks.create_datacenter(datacenter=i)
-
- _wait_for_completion(profitbricks, datacenter_response,
- wait_timeout, "_create_datacenter")
-
- return datacenter_response
- except Exception as e:
- module.fail_json(msg="failed to create the new server(s): %s" % str(e))
-
-
-def create_virtual_machine(module, profitbricks):
- """
- Create new virtual machine
-
- module : AnsibleModule object
- community.general.profitbricks: authenticated profitbricks object
-
- Returns:
- True if a new virtual machine was created, false otherwise
- """
- datacenter = module.params.get('datacenter')
- name = module.params.get('name')
- auto_increment = module.params.get('auto_increment')
- count = module.params.get('count')
- lan = module.params.get('lan')
- wait_timeout = module.params.get('wait_timeout')
- failed = True
- datacenter_found = False
-
- virtual_machines = []
- virtual_machine_ids = []
-
- # Locate UUID for datacenter if referenced by name.
- datacenter_list = profitbricks.list_datacenters()
- datacenter_id = _get_datacenter_id(datacenter_list, datacenter)
- if datacenter_id:
- datacenter_found = True
-
- if not datacenter_found:
- datacenter_response = _create_datacenter(module, profitbricks)
- datacenter_id = datacenter_response['id']
-
- _wait_for_completion(profitbricks, datacenter_response,
- wait_timeout, "create_virtual_machine")
-
- if auto_increment:
- numbers = set()
- count_offset = 1
-
- try:
- name % 0
- except TypeError as e:
- if e.message.startswith('not all'):
- name = '%s%%d' % name
- else:
- module.fail_json(msg=e.message, exception=traceback.format_exc())
-
- number_range = xrange(count_offset, count_offset + count + len(numbers))
- available_numbers = list(set(number_range).difference(numbers))
- names = []
- numbers_to_use = available_numbers[:count]
- for number in numbers_to_use:
- names.append(name % number)
- else:
- names = [name]
-
- # Prefetch a list of servers for later comparison.
- server_list = profitbricks.list_servers(datacenter_id)
- for name in names:
- # Skip server creation if the server already exists.
- if _get_server_id(server_list, name):
- continue
-
- create_response = _create_machine(module, profitbricks, str(datacenter_id), name)
- nics = profitbricks.list_nics(datacenter_id, create_response['id'])
- for n in nics['items']:
- if lan == n['properties']['lan']:
- create_response.update({'public_ip': n['properties']['ips'][0]})
-
- virtual_machines.append(create_response)
-
- failed = False
-
- results = {
- 'failed': failed,
- 'machines': virtual_machines,
- 'action': 'create',
- 'instance_ids': {
- 'instances': [i['id'] for i in virtual_machines],
- }
- }
-
- return results
-
-
-def remove_virtual_machine(module, profitbricks):
- """
- Removes a virtual machine.
-
- This will remove the virtual machine along with the bootVolume.
-
- module : AnsibleModule object
- community.general.profitbricks: authenticated profitbricks object.
-
- Not yet supported: handle deletion of attached data disks.
-
- Returns:
- True if a new virtual server was deleted, false otherwise
- """
- datacenter = module.params.get('datacenter')
- instance_ids = module.params.get('instance_ids')
- remove_boot_volume = module.params.get('remove_boot_volume')
- changed = False
-
- if not isinstance(module.params.get('instance_ids'), list) or len(module.params.get('instance_ids')) < 1:
- module.fail_json(msg='instance_ids should be a list of virtual machine ids or names, aborting')
-
- # Locate UUID for datacenter if referenced by name.
- datacenter_list = profitbricks.list_datacenters()
- datacenter_id = _get_datacenter_id(datacenter_list, datacenter)
- if not datacenter_id:
- module.fail_json(msg='Virtual data center \'%s\' not found.' % str(datacenter))
-
- # Prefetch server list for later comparison.
- server_list = profitbricks.list_servers(datacenter_id)
- for instance in instance_ids:
- # Locate UUID for server if referenced by name.
- server_id = _get_server_id(server_list, instance)
- if server_id:
- # Remove the server's boot volume
- if remove_boot_volume:
- _remove_boot_volume(module, profitbricks, datacenter_id, server_id)
-
- # Remove the server
- try:
- server_response = profitbricks.delete_server(datacenter_id, server_id)
- except Exception as e:
- module.fail_json(msg="failed to terminate the virtual server: %s" % to_native(e), exception=traceback.format_exc())
- else:
- changed = True
-
- return changed
-
-
-def _remove_boot_volume(module, profitbricks, datacenter_id, server_id):
- """
- Remove the boot volume from the server
- """
- try:
- server = profitbricks.get_server(datacenter_id, server_id)
- volume_id = server['properties']['bootVolume']['id']
- volume_response = profitbricks.delete_volume(datacenter_id, volume_id)
- except Exception as e:
- module.fail_json(msg="failed to remove the server's boot volume: %s" % to_native(e), exception=traceback.format_exc())
-
-
-def startstop_machine(module, profitbricks, state):
- """
- Starts or Stops a virtual machine.
-
- module : AnsibleModule object
- community.general.profitbricks: authenticated profitbricks object.
-
- Returns:
- True when the servers process the action successfully, false otherwise.
- """
- if not isinstance(module.params.get('instance_ids'), list) or len(module.params.get('instance_ids')) < 1:
- module.fail_json(msg='instance_ids should be a list of virtual machine ids or names, aborting')
-
- wait = module.params.get('wait')
- wait_timeout = module.params.get('wait_timeout')
- changed = False
-
- datacenter = module.params.get('datacenter')
- instance_ids = module.params.get('instance_ids')
-
- # Locate UUID for datacenter if referenced by name.
- datacenter_list = profitbricks.list_datacenters()
- datacenter_id = _get_datacenter_id(datacenter_list, datacenter)
- if not datacenter_id:
- module.fail_json(msg='Virtual data center \'%s\' not found.' % str(datacenter))
-
- # Prefetch server list for later comparison.
- server_list = profitbricks.list_servers(datacenter_id)
- for instance in instance_ids:
- # Locate UUID of server if referenced by name.
- server_id = _get_server_id(server_list, instance)
- if server_id:
- _startstop_machine(module, profitbricks, datacenter_id, server_id)
- changed = True
-
- if wait:
- wait_timeout = time.time() + wait_timeout
- while wait_timeout > time.time():
- matched_instances = []
- for res in profitbricks.list_servers(datacenter_id)['items']:
- if state == 'running':
- if res['properties']['vmState'].lower() == state:
- matched_instances.append(res)
- elif state == 'stopped':
- if res['properties']['vmState'].lower() == 'shutoff':
- matched_instances.append(res)
-
- if len(matched_instances) < len(instance_ids):
- time.sleep(5)
- else:
- break
-
- if wait_timeout <= time.time():
- # waiting took too long
- module.fail_json(msg="wait for virtual machine state timeout on %s" % time.asctime())
-
- return (changed)
-
-
-def _get_datacenter_id(datacenters, identity):
- """
- Fetch and return datacenter UUID by datacenter name if found.
- """
- for datacenter in datacenters['items']:
- if identity in (datacenter['properties']['name'], datacenter['id']):
- return datacenter['id']
- return None
-
-
-def _get_server_id(servers, identity):
- """
- Fetch and return server UUID by server name if found.
- """
- for server in servers['items']:
- if identity in (server['properties']['name'], server['id']):
- return server['id']
- return None
-
-
-def main():
- module = AnsibleModule(
- argument_spec=dict(
- datacenter=dict(),
- name=dict(),
- image=dict(),
- cores=dict(type='int', default=2),
- ram=dict(type='int', default=2048),
- cpu_family=dict(choices=['AMD_OPTERON', 'INTEL_XEON'],
- default='AMD_OPTERON'),
- volume_size=dict(type='int', default=10),
- disk_type=dict(choices=['HDD', 'SSD'], default='HDD'),
- image_password=dict(no_log=True),
- ssh_keys=dict(type='list', elements='str', default=[], no_log=False),
- bus=dict(choices=['VIRTIO', 'IDE'], default='VIRTIO'),
- lan=dict(type='int', default=1),
- count=dict(type='int', default=1),
- auto_increment=dict(type='bool', default=True),
- instance_ids=dict(type='list', elements='str', default=[]),
- subscription_user=dict(),
- subscription_password=dict(no_log=True),
- location=dict(choices=LOCATIONS, default='us/las'),
- assign_public_ip=dict(type='bool', default=False),
- wait=dict(type='bool', default=True),
- wait_timeout=dict(type='int', default=600),
- remove_boot_volume=dict(type='bool', default=True),
- state=dict(default='present'),
- )
- )
-
- if not HAS_PB_SDK:
- module.fail_json(msg='profitbricks required for this module')
-
- subscription_user = module.params.get('subscription_user')
- subscription_password = module.params.get('subscription_password')
-
- profitbricks = ProfitBricksService(
- username=subscription_user,
- password=subscription_password)
-
- state = module.params.get('state')
-
- if state == 'absent':
- if not module.params.get('datacenter'):
- module.fail_json(msg='datacenter parameter is required ' +
- 'for running or stopping machines.')
-
- try:
- (changed) = remove_virtual_machine(module, profitbricks)
- module.exit_json(changed=changed)
- except Exception as e:
- module.fail_json(msg='failed to set instance state: %s' % to_native(e), exception=traceback.format_exc())
-
- elif state in ('running', 'stopped'):
- if not module.params.get('datacenter'):
- module.fail_json(msg='datacenter parameter is required for ' +
- 'running or stopping machines.')
- try:
- (changed) = startstop_machine(module, profitbricks, state)
- module.exit_json(changed=changed)
- except Exception as e:
- module.fail_json(msg='failed to set instance state: %s' % to_native(e), exception=traceback.format_exc())
-
- elif state == 'present':
- if not module.params.get('name'):
- module.fail_json(msg='name parameter is required for new instance')
- if not module.params.get('image'):
- module.fail_json(msg='image parameter is required for new instance')
- if not module.params.get('subscription_user'):
- module.fail_json(msg='subscription_user parameter is ' +
- 'required for new instance')
- if not module.params.get('subscription_password'):
- module.fail_json(msg='subscription_password parameter is ' +
- 'required for new instance')
-
- try:
- (machine_dict_array) = create_virtual_machine(module, profitbricks)
- module.exit_json(**machine_dict_array)
- except Exception as e:
- module.fail_json(msg='failed to set instance state: %s' % to_native(e), exception=traceback.format_exc())
-
-
-if __name__ == '__main__':
- main()
diff --git a/plugins/modules/profitbricks_datacenter.py b/plugins/modules/profitbricks_datacenter.py
deleted file mode 100644
index 3f9561cb41..0000000000
--- a/plugins/modules/profitbricks_datacenter.py
+++ /dev/null
@@ -1,273 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-# Copyright Ansible Project
-# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = r"""
-module: profitbricks_datacenter
-short_description: Create or destroy a ProfitBricks Virtual Datacenter
-description:
- - This is a simple module that supports creating or removing vDCs. A vDC is required before you can create servers. This
- module has a dependency on profitbricks >= 1.0.0.
-deprecated:
- removed_in: 11.0.0
- why: Module relies on library unsupported since 2021.
- alternative: >
- Profitbricks has rebranded as Ionos Cloud and they provide a collection named ionoscloudsdk.ionoscloud.
- Whilst it is likely it will provide the features of this module, that has not been verified.
- Please refer to that collection's documentation for more details.
-
-extends_documentation_fragment:
- - community.general.attributes
-attributes:
- check_mode:
- support: none
- diff_mode:
- support: none
-options:
- name:
- description:
- - The name of the virtual datacenter.
- type: str
- description:
- description:
- - The description of the virtual datacenter.
- type: str
- required: false
- location:
- description:
- - The datacenter location.
- type: str
- required: false
- default: us/las
- choices: ["us/las", "de/fra", "de/fkb"]
- subscription_user:
- description:
- - The ProfitBricks username. Overrides the E(PB_SUBSCRIPTION_ID) environment variable.
- type: str
- required: false
- subscription_password:
- description:
- - THe ProfitBricks password. Overrides the E(PB_PASSWORD) environment variable.
- type: str
- required: false
- wait:
- description:
- - Wait for the datacenter to be created before returning.
- required: false
- default: true
- type: bool
- wait_timeout:
- description:
- - How long before wait gives up, in seconds.
- type: int
- default: 600
- state:
- description:
- - Create or terminate datacenters.
- - 'The available choices are: V(present), V(absent).'
- type: str
- required: false
- default: 'present'
-
-requirements: ["profitbricks"]
-author: Matt Baldwin (@baldwinSPC)
-"""
-
-EXAMPLES = r"""
-- name: Create a datacenter
- community.general.profitbricks_datacenter:
- datacenter: Tardis One
- wait_timeout: 500
-
-- name: Destroy a datacenter (remove all servers, volumes, and other objects in the datacenter)
- community.general.profitbricks_datacenter:
- datacenter: Tardis One
- wait_timeout: 500
- state: absent
-"""
-
-import re
-import time
-
-HAS_PB_SDK = True
-try:
- from profitbricks.client import ProfitBricksService, Datacenter
-except ImportError:
- HAS_PB_SDK = False
-
-from ansible.module_utils.basic import AnsibleModule
-
-
-LOCATIONS = ['us/las',
- 'de/fra',
- 'de/fkb']
-
-uuid_match = re.compile(
- r'[\w]{8}-[\w]{4}-[\w]{4}-[\w]{4}-[\w]{12}', re.I)
-
-
-def _wait_for_completion(profitbricks, promise, wait_timeout, msg):
- if not promise:
- return
- wait_timeout = time.time() + wait_timeout
- while wait_timeout > time.time():
- time.sleep(5)
- operation_result = profitbricks.get_request(
- request_id=promise['requestId'],
- status=True)
-
- if operation_result['metadata']['status'] == "DONE":
- return
- elif operation_result['metadata']['status'] == "FAILED":
- raise Exception(
- 'Request failed to complete ' + msg + ' "' + str(
- promise['requestId']) + '" to complete.')
-
- raise Exception(
- 'Timed out waiting for async operation ' + msg + ' "' + str(
- promise['requestId']
- ) + '" to complete.')
-
-
-def _remove_datacenter(module, profitbricks, datacenter):
- try:
- profitbricks.delete_datacenter(datacenter)
- except Exception as e:
- module.fail_json(msg="failed to remove the datacenter: %s" % str(e))
-
-
-def create_datacenter(module, profitbricks):
- """
- Creates a Datacenter
-
- This will create a new Datacenter in the specified location.
-
- module : AnsibleModule object
- profitbricks: authenticated profitbricks object.
-
- Returns:
- True if a new datacenter was created, false otherwise
- """
- name = module.params.get('name')
- location = module.params.get('location')
- description = module.params.get('description')
- wait = module.params.get('wait')
- wait_timeout = int(module.params.get('wait_timeout'))
-
- i = Datacenter(
- name=name,
- location=location,
- description=description
- )
-
- try:
- datacenter_response = profitbricks.create_datacenter(datacenter=i)
-
- if wait:
- _wait_for_completion(profitbricks, datacenter_response,
- wait_timeout, "_create_datacenter")
-
- results = {
- 'datacenter_id': datacenter_response['id']
- }
-
- return results
-
- except Exception as e:
- module.fail_json(msg="failed to create the new datacenter: %s" % str(e))
-
-
-def remove_datacenter(module, profitbricks):
- """
- Removes a Datacenter.
-
- This will remove a datacenter.
-
- module : AnsibleModule object
- profitbricks: authenticated profitbricks object.
-
- Returns:
- True if the datacenter was deleted, false otherwise
- """
- name = module.params.get('name')
- changed = False
-
- if uuid_match.match(name):
- _remove_datacenter(module, profitbricks, name)
- changed = True
- else:
- datacenters = profitbricks.list_datacenters()
-
- for d in datacenters['items']:
- vdc = profitbricks.get_datacenter(d['id'])
-
- if name == vdc['properties']['name']:
- name = d['id']
- _remove_datacenter(module, profitbricks, name)
- changed = True
-
- return changed
-
-
-def main():
- module = AnsibleModule(
- argument_spec=dict(
- name=dict(),
- description=dict(),
- location=dict(choices=LOCATIONS, default='us/las'),
- subscription_user=dict(),
- subscription_password=dict(no_log=True),
- wait=dict(type='bool', default=True),
- wait_timeout=dict(default=600, type='int'),
- state=dict(default='present'), # @TODO add choices
- )
- )
- if not HAS_PB_SDK:
- module.fail_json(msg='profitbricks required for this module')
-
- if not module.params.get('subscription_user'):
- module.fail_json(msg='subscription_user parameter is required')
- if not module.params.get('subscription_password'):
- module.fail_json(msg='subscription_password parameter is required')
-
- subscription_user = module.params.get('subscription_user')
- subscription_password = module.params.get('subscription_password')
-
- profitbricks = ProfitBricksService(
- username=subscription_user,
- password=subscription_password)
-
- state = module.params.get('state')
-
- if state == 'absent':
- if not module.params.get('name'):
- module.fail_json(msg='name parameter is required deleting a virtual datacenter.')
-
- try:
- (changed) = remove_datacenter(module, profitbricks)
- module.exit_json(
- changed=changed)
- except Exception as e:
- module.fail_json(msg='failed to set datacenter state: %s' % str(e))
-
- elif state == 'present':
- if not module.params.get('name'):
- module.fail_json(msg='name parameter is required for a new datacenter')
- if not module.params.get('location'):
- module.fail_json(msg='location parameter is required for a new datacenter')
-
- try:
- (datacenter_dict_array) = create_datacenter(module, profitbricks)
- module.exit_json(**datacenter_dict_array)
- except Exception as e:
- module.fail_json(msg='failed to set datacenter state: %s' % str(e))
-
-
-if __name__ == '__main__':
- main()
diff --git a/plugins/modules/profitbricks_nic.py b/plugins/modules/profitbricks_nic.py
deleted file mode 100644
index 94d68677d6..0000000000
--- a/plugins/modules/profitbricks_nic.py
+++ /dev/null
@@ -1,303 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-# Copyright Ansible Project
-# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = r"""
-module: profitbricks_nic
-short_description: Create or Remove a NIC
-description:
- - This module allows you to create or restore a volume snapshot. This module has a dependency on profitbricks >= 1.0.0.
-deprecated:
- removed_in: 11.0.0
- why: Module relies on library unsupported since 2021.
- alternative: >
- Profitbricks has rebranded as Ionos Cloud and they provide a collection named ionoscloudsdk.ionoscloud.
- Whilst it is likely it will provide the features of this module, that has not been verified.
- Please refer to that collection's documentation for more details.
-extends_documentation_fragment:
- - community.general.attributes
-attributes:
- check_mode:
- support: none
- diff_mode:
- support: none
-options:
- datacenter:
- description:
- - The datacenter in which to operate.
- type: str
- required: true
- server:
- description:
- - The server name or ID.
- type: str
- required: true
- name:
- description:
- - The name or ID of the NIC. This is only required on deletes, but not on create.
- - If not specified, it defaults to a value based on UUID4.
- type: str
- lan:
- description:
- - The LAN to place the NIC on. You can pass a LAN that does not exist and it will be created. Required on create.
- type: str
- subscription_user:
- description:
- - The ProfitBricks username. Overrides the E(PB_SUBSCRIPTION_ID) environment variable.
- type: str
- required: true
- subscription_password:
- description:
- - THe ProfitBricks password. Overrides the E(PB_PASSWORD) environment variable.
- type: str
- required: true
- wait:
- description:
- - Wait for the operation to complete before returning.
- required: false
- default: true
- type: bool
- wait_timeout:
- description:
- - How long before wait gives up, in seconds.
- type: int
- default: 600
- state:
- description:
- - Indicate desired state of the resource.
- - 'The available choices are: V(present), V(absent).'
- type: str
- required: false
- default: 'present'
-
-requirements: ["profitbricks"]
-author: Matt Baldwin (@baldwinSPC)
-"""
-
-EXAMPLES = r"""
-- name: Create a NIC
- community.general.profitbricks_nic:
- datacenter: Tardis One
- server: node002
- lan: 2
- wait_timeout: 500
- state: present
-
-- name: Remove a NIC
- community.general.profitbricks_nic:
- datacenter: Tardis One
- server: node002
- name: 7341c2454f
- wait_timeout: 500
- state: absent
-"""
-
-import re
-import uuid
-import time
-
-HAS_PB_SDK = True
-try:
- from profitbricks.client import ProfitBricksService, NIC
-except ImportError:
- HAS_PB_SDK = False
-
-from ansible.module_utils.basic import AnsibleModule
-
-
-uuid_match = re.compile(
- r'[\w]{8}-[\w]{4}-[\w]{4}-[\w]{4}-[\w]{12}', re.I)
-
-
-def _make_default_name():
- return str(uuid.uuid4()).replace('-', '')[:10]
-
-
-def _wait_for_completion(profitbricks, promise, wait_timeout, msg):
- if not promise:
- return
- wait_timeout = time.time() + wait_timeout
- while wait_timeout > time.time():
- time.sleep(5)
- operation_result = profitbricks.get_request(
- request_id=promise['requestId'],
- status=True)
-
- if operation_result['metadata']['status'] == "DONE":
- return
- elif operation_result['metadata']['status'] == "FAILED":
- raise Exception(
- 'Request failed to complete ' + msg + ' "' + str(
- promise['requestId']) + '" to complete.')
-
- raise Exception(
- 'Timed out waiting for async operation ' + msg + ' "' + str(
- promise['requestId']
- ) + '" to complete.')
-
-
-def create_nic(module, profitbricks):
- """
- Creates a NIC.
-
- module : AnsibleModule object
- profitbricks: authenticated profitbricks object.
-
- Returns:
- True if the nic creates, false otherwise
- """
- datacenter = module.params.get('datacenter')
- server = module.params.get('server')
- lan = module.params.get('lan')
- name = module.params.get('name')
- if name is None:
- name = _make_default_name()
- wait = module.params.get('wait')
- wait_timeout = module.params.get('wait_timeout')
-
- # Locate UUID for Datacenter
- if not (uuid_match.match(datacenter)):
- datacenter_list = profitbricks.list_datacenters()
- for d in datacenter_list['items']:
- dc = profitbricks.get_datacenter(d['id'])
- if datacenter == dc['properties']['name']:
- datacenter = d['id']
- break
-
- # Locate UUID for Server
- if not (uuid_match.match(server)):
- server_list = profitbricks.list_servers(datacenter)
- for s in server_list['items']:
- if server == s['properties']['name']:
- server = s['id']
- break
- try:
- n = NIC(
- name=name,
- lan=lan
- )
-
- nic_response = profitbricks.create_nic(datacenter, server, n)
-
- if wait:
- _wait_for_completion(profitbricks, nic_response,
- wait_timeout, "create_nic")
-
- return nic_response
-
- except Exception as e:
- module.fail_json(msg="failed to create the NIC: %s" % str(e))
-
-
-def delete_nic(module, profitbricks):
- """
- Removes a NIC
-
- module : AnsibleModule object
- profitbricks: authenticated profitbricks object.
-
- Returns:
- True if the NIC was removed, false otherwise
- """
- datacenter = module.params.get('datacenter')
- server = module.params.get('server')
- name = module.params.get('name')
- if name is None:
- name = _make_default_name()
-
- # Locate UUID for Datacenter
- if not (uuid_match.match(datacenter)):
- datacenter_list = profitbricks.list_datacenters()
- for d in datacenter_list['items']:
- dc = profitbricks.get_datacenter(d['id'])
- if datacenter == dc['properties']['name']:
- datacenter = d['id']
- break
-
- # Locate UUID for Server
- server_found = False
- if not (uuid_match.match(server)):
- server_list = profitbricks.list_servers(datacenter)
- for s in server_list['items']:
- if server == s['properties']['name']:
- server_found = True
- server = s['id']
- break
-
- if not server_found:
- return False
-
- # Locate UUID for NIC
- nic_found = False
- if not (uuid_match.match(name)):
- nic_list = profitbricks.list_nics(datacenter, server)
- for n in nic_list['items']:
- if name == n['properties']['name']:
- nic_found = True
- name = n['id']
- break
-
- if not nic_found:
- return False
-
- try:
- nic_response = profitbricks.delete_nic(datacenter, server, name)
- return nic_response
- except Exception as e:
- module.fail_json(msg="failed to remove the NIC: %s" % str(e))
-
-
-def main():
- module = AnsibleModule(
- argument_spec=dict(
- datacenter=dict(required=True),
- server=dict(required=True),
- name=dict(),
- lan=dict(),
- subscription_user=dict(required=True),
- subscription_password=dict(required=True, no_log=True),
- wait=dict(type='bool', default=True),
- wait_timeout=dict(type='int', default=600),
- state=dict(default='present'),
- ),
- required_if=(
- ('state', 'absent', ['name']),
- ('state', 'present', ['lan']),
- )
- )
-
- if not HAS_PB_SDK:
- module.fail_json(msg='profitbricks required for this module')
-
- subscription_user = module.params.get('subscription_user')
- subscription_password = module.params.get('subscription_password')
-
- profitbricks = ProfitBricksService(
- username=subscription_user,
- password=subscription_password)
-
- state = module.params.get('state')
-
- if state == 'absent':
- try:
- (changed) = delete_nic(module, profitbricks)
- module.exit_json(changed=changed)
- except Exception as e:
- module.fail_json(msg='failed to set nic state: %s' % str(e))
-
- elif state == 'present':
- try:
- (nic_dict) = create_nic(module, profitbricks)
- module.exit_json(nics=nic_dict) # @FIXME changed not calculated?
- except Exception as e:
- module.fail_json(msg='failed to set nic state: %s' % str(e))
-
-
-if __name__ == '__main__':
- main()
diff --git a/plugins/modules/profitbricks_volume.py b/plugins/modules/profitbricks_volume.py
deleted file mode 100644
index 6f5b65cd00..0000000000
--- a/plugins/modules/profitbricks_volume.py
+++ /dev/null
@@ -1,448 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-# Copyright Ansible Project
-# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = r"""
-module: profitbricks_volume
-short_description: Create or destroy a volume
-description:
- - Allows you to create or remove a volume from a ProfitBricks datacenter. This module has a dependency on profitbricks >=
- 1.0.0.
-deprecated:
- removed_in: 11.0.0
- why: Module relies on library unsupported since 2021.
- alternative: >
- Profitbricks has rebranded as Ionos Cloud and they provide a collection named ionoscloudsdk.ionoscloud.
- Whilst it is likely it will provide the features of this module, that has not been verified.
- Please refer to that collection's documentation for more details.
-extends_documentation_fragment:
- - community.general.attributes
-attributes:
- check_mode:
- support: none
- diff_mode:
- support: none
-options:
- datacenter:
- description:
- - The datacenter in which to create the volumes.
- type: str
- name:
- description:
- - The name of the volumes. You can enumerate the names using auto_increment.
- type: str
- size:
- description:
- - The size of the volume.
- type: int
- required: false
- default: 10
- bus:
- description:
- - The bus type.
- type: str
- required: false
- default: VIRTIO
- choices: ["IDE", "VIRTIO"]
- image:
- description:
- - The system image ID for the volume, for example V(a3eae284-a2fe-11e4-b187-5f1f641608c8). This can also be a snapshot
- image ID.
- type: str
- image_password:
- description:
- - Password set for the administrative user.
- type: str
- required: false
- ssh_keys:
- description:
- - Public SSH keys allowing access to the virtual machine.
- type: list
- elements: str
- default: []
- disk_type:
- description:
- - The disk type of the volume.
- type: str
- required: false
- default: HDD
- choices: ["HDD", "SSD"]
- licence_type:
- description:
- - The licence type for the volume. This is used when the image is non-standard.
- - 'The available choices are: V(LINUX), V(WINDOWS), V(UNKNOWN), V(OTHER).'
- type: str
- required: false
- default: UNKNOWN
- count:
- description:
- - The number of volumes you wish to create.
- type: int
- required: false
- default: 1
- auto_increment:
- description:
- - Whether or not to increment a single number in the name for created virtual machines.
- default: true
- type: bool
- instance_ids:
- description:
- - List of instance IDs, currently only used when O(state=absent) to remove instances.
- type: list
- elements: str
- default: []
- subscription_user:
- description:
- - The ProfitBricks username. Overrides the E(PB_SUBSCRIPTION_ID) environment variable.
- type: str
- required: false
- subscription_password:
- description:
- - THe ProfitBricks password. Overrides the E(PB_PASSWORD) environment variable.
- type: str
- required: false
- wait:
- description:
- - Wait for the datacenter to be created before returning.
- required: false
- default: true
- type: bool
- wait_timeout:
- description:
- - How long before wait gives up, in seconds.
- type: int
- default: 600
- state:
- description:
- - Create or terminate datacenters.
- - 'The available choices are: V(present), V(absent).'
- type: str
- required: false
- default: 'present'
- server:
- description:
- - Server name to attach the volume to.
- type: str
-
-requirements: ["profitbricks"]
-author: Matt Baldwin (@baldwinSPC)
-"""
-
-EXAMPLES = r"""
-- name: Create multiple volumes
- community.general.profitbricks_volume:
- datacenter: Tardis One
- name: vol%02d
- count: 5
- auto_increment: true
- wait_timeout: 500
- state: present
-
-- name: Remove Volumes
- community.general.profitbricks_volume:
- datacenter: Tardis One
- instance_ids:
- - 'vol01'
- - 'vol02'
- wait_timeout: 500
- state: absent
-"""
-
-import re
-import time
-import traceback
-
-HAS_PB_SDK = True
-try:
- from profitbricks.client import ProfitBricksService, Volume
-except ImportError:
- HAS_PB_SDK = False
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible.module_utils.six.moves import xrange
-from ansible.module_utils.common.text.converters import to_native
-
-
-uuid_match = re.compile(
- r'[\w]{8}-[\w]{4}-[\w]{4}-[\w]{4}-[\w]{12}', re.I)
-
-
-def _wait_for_completion(profitbricks, promise, wait_timeout, msg):
- if not promise:
- return
- wait_timeout = time.time() + wait_timeout
- while wait_timeout > time.time():
- time.sleep(5)
- operation_result = profitbricks.get_request(
- request_id=promise['requestId'],
- status=True)
-
- if operation_result['metadata']['status'] == "DONE":
- return
- elif operation_result['metadata']['status'] == "FAILED":
- raise Exception(
- 'Request failed to complete ' + msg + ' "' + str(
- promise['requestId']) + '" to complete.')
-
- raise Exception(
- 'Timed out waiting for async operation ' + msg + ' "' + str(
- promise['requestId']
- ) + '" to complete.')
-
-
-def _create_volume(module, profitbricks, datacenter, name):
- size = module.params.get('size')
- bus = module.params.get('bus')
- image = module.params.get('image')
- image_password = module.params.get('image_password')
- ssh_keys = module.params.get('ssh_keys')
- disk_type = module.params.get('disk_type')
- licence_type = module.params.get('licence_type')
- wait_timeout = module.params.get('wait_timeout')
- wait = module.params.get('wait')
-
- try:
- v = Volume(
- name=name,
- size=size,
- bus=bus,
- image=image,
- image_password=image_password,
- ssh_keys=ssh_keys,
- disk_type=disk_type,
- licence_type=licence_type
- )
-
- volume_response = profitbricks.create_volume(datacenter, v)
-
- if wait:
- _wait_for_completion(profitbricks, volume_response,
- wait_timeout, "_create_volume")
-
- except Exception as e:
- module.fail_json(msg="failed to create the volume: %s" % str(e))
-
- return volume_response
-
-
-def _delete_volume(module, profitbricks, datacenter, volume):
- try:
- profitbricks.delete_volume(datacenter, volume)
- except Exception as e:
- module.fail_json(msg="failed to remove the volume: %s" % str(e))
-
-
-def create_volume(module, profitbricks):
- """
- Creates a volume.
-
- This will create a volume in a datacenter.
-
- module : AnsibleModule object
- profitbricks: authenticated profitbricks object.
-
- Returns:
- True if the volume was created, false otherwise
- """
- datacenter = module.params.get('datacenter')
- name = module.params.get('name')
- auto_increment = module.params.get('auto_increment')
- count = module.params.get('count')
-
- datacenter_found = False
- failed = True
- volumes = []
-
- # Locate UUID for Datacenter
- if not (uuid_match.match(datacenter)):
- datacenter_list = profitbricks.list_datacenters()
- for d in datacenter_list['items']:
- dc = profitbricks.get_datacenter(d['id'])
- if datacenter == dc['properties']['name']:
- datacenter = d['id']
- datacenter_found = True
- break
-
- if not datacenter_found:
- module.fail_json(msg='datacenter could not be found.')
-
- if auto_increment:
- numbers = set()
- count_offset = 1
-
- try:
- name % 0
- except TypeError as e:
- if e.message.startswith('not all'):
- name = '%s%%d' % name
- else:
- module.fail_json(msg=e.message, exception=traceback.format_exc())
-
- number_range = xrange(count_offset, count_offset + count + len(numbers))
- available_numbers = list(set(number_range).difference(numbers))
- names = []
- numbers_to_use = available_numbers[:count]
- for number in numbers_to_use:
- names.append(name % number)
- else:
- names = [name] * count
-
- for name in names:
- create_response = _create_volume(module, profitbricks, str(datacenter), name)
- volumes.append(create_response)
- _attach_volume(module, profitbricks, datacenter, create_response['id'])
- failed = False
-
- results = {
- 'failed': failed,
- 'volumes': volumes,
- 'action': 'create',
- 'instance_ids': {
- 'instances': [i['id'] for i in volumes],
- }
- }
-
- return results
-
-
-def delete_volume(module, profitbricks):
- """
- Removes a volume.
-
- This will create a volume in a datacenter.
-
- module : AnsibleModule object
- profitbricks: authenticated profitbricks object.
-
- Returns:
- True if the volume was removed, false otherwise
- """
- if not isinstance(module.params.get('instance_ids'), list) or len(module.params.get('instance_ids')) < 1:
- module.fail_json(msg='instance_ids should be a list of virtual machine ids or names, aborting')
-
- datacenter = module.params.get('datacenter')
- changed = False
- instance_ids = module.params.get('instance_ids')
-
- # Locate UUID for Datacenter
- if not (uuid_match.match(datacenter)):
- datacenter_list = profitbricks.list_datacenters()
- for d in datacenter_list['items']:
- dc = profitbricks.get_datacenter(d['id'])
- if datacenter == dc['properties']['name']:
- datacenter = d['id']
- break
-
- for n in instance_ids:
- if uuid_match.match(n):
- _delete_volume(module, profitbricks, datacenter, n)
- changed = True
- else:
- volumes = profitbricks.list_volumes(datacenter)
- for v in volumes['items']:
- if n == v['properties']['name']:
- volume_id = v['id']
- _delete_volume(module, profitbricks, datacenter, volume_id)
- changed = True
-
- return changed
-
-
-def _attach_volume(module, profitbricks, datacenter, volume):
- """
- Attaches a volume.
-
- This will attach a volume to the server.
-
- module : AnsibleModule object
- profitbricks: authenticated profitbricks object.
-
- Returns:
- True if the volume was attached, false otherwise
- """
- server = module.params.get('server')
-
- # Locate UUID for Server
- if server:
- if not (uuid_match.match(server)):
- server_list = profitbricks.list_servers(datacenter)
- for s in server_list['items']:
- if server == s['properties']['name']:
- server = s['id']
- break
-
- try:
- return profitbricks.attach_volume(datacenter, server, volume)
- except Exception as e:
- module.fail_json(msg='failed to attach volume: %s' % to_native(e), exception=traceback.format_exc())
-
-
-def main():
- module = AnsibleModule(
- argument_spec=dict(
- datacenter=dict(),
- server=dict(),
- name=dict(),
- size=dict(type='int', default=10),
- bus=dict(choices=['VIRTIO', 'IDE'], default='VIRTIO'),
- image=dict(),
- image_password=dict(no_log=True),
- ssh_keys=dict(type='list', elements='str', default=[], no_log=False),
- disk_type=dict(choices=['HDD', 'SSD'], default='HDD'),
- licence_type=dict(default='UNKNOWN'),
- count=dict(type='int', default=1),
- auto_increment=dict(type='bool', default=True),
- instance_ids=dict(type='list', elements='str', default=[]),
- subscription_user=dict(),
- subscription_password=dict(no_log=True),
- wait=dict(type='bool', default=True),
- wait_timeout=dict(type='int', default=600),
- state=dict(default='present'),
- )
- )
-
- if not module.params.get('subscription_user'):
- module.fail_json(msg='subscription_user parameter is required')
- if not module.params.get('subscription_password'):
- module.fail_json(msg='subscription_password parameter is required')
-
- subscription_user = module.params.get('subscription_user')
- subscription_password = module.params.get('subscription_password')
-
- profitbricks = ProfitBricksService(
- username=subscription_user,
- password=subscription_password)
-
- state = module.params.get('state')
-
- if state == 'absent':
- if not module.params.get('datacenter'):
- module.fail_json(msg='datacenter parameter is required for running or stopping machines.')
-
- try:
- (changed) = delete_volume(module, profitbricks)
- module.exit_json(changed=changed)
- except Exception as e:
- module.fail_json(msg='failed to set volume state: %s' % to_native(e), exception=traceback.format_exc())
-
- elif state == 'present':
- if not module.params.get('datacenter'):
- module.fail_json(msg='datacenter parameter is required for new instance')
- if not module.params.get('name'):
- module.fail_json(msg='name parameter is required for new instance')
-
- try:
- (volume_dict_array) = create_volume(module, profitbricks)
- module.exit_json(**volume_dict_array)
- except Exception as e:
- module.fail_json(msg='failed to set volume state: %s' % to_native(e), exception=traceback.format_exc())
-
-
-if __name__ == '__main__':
- main()
diff --git a/plugins/modules/profitbricks_volume_attachments.py b/plugins/modules/profitbricks_volume_attachments.py
deleted file mode 100644
index 8f7d2f1d53..0000000000
--- a/plugins/modules/profitbricks_volume_attachments.py
+++ /dev/null
@@ -1,273 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-# Copyright Ansible Project
-# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = r"""
-module: profitbricks_volume_attachments
-short_description: Attach or detach a volume
-description:
- - Allows you to attach or detach a volume from a ProfitBricks server. This module has a dependency on profitbricks >= 1.0.0.
-deprecated:
- removed_in: 11.0.0
- why: Module relies on library unsupported since 2021.
- alternative: >
- Profitbricks has rebranded as Ionos Cloud and they provide a collection named ionoscloudsdk.ionoscloud.
- Whilst it is likely it will provide the features of this module, that has not been verified.
- Please refer to that collection's documentation for more details.
-extends_documentation_fragment:
- - community.general.attributes
-attributes:
- check_mode:
- support: none
- diff_mode:
- support: none
-options:
- datacenter:
- description:
- - The datacenter in which to operate.
- type: str
- server:
- description:
- - The name of the server you wish to detach or attach the volume.
- type: str
- volume:
- description:
- - The volume name or ID.
- type: str
- subscription_user:
- description:
- - The ProfitBricks username. Overrides the E(PB_SUBSCRIPTION_ID) environment variable.
- type: str
- required: false
- subscription_password:
- description:
- - THe ProfitBricks password. Overrides the E(PB_PASSWORD) environment variable.
- type: str
- required: false
- wait:
- description:
- - Wait for the operation to complete before returning.
- required: false
- default: true
- type: bool
- wait_timeout:
- description:
- - How long before wait gives up, in seconds.
- type: int
- default: 600
- state:
- description:
- - Indicate desired state of the resource.
- - 'The available choices are: V(present), V(absent).'
- type: str
- required: false
- default: 'present'
-
-requirements: ["profitbricks"]
-author: Matt Baldwin (@baldwinSPC)
-"""
-
-EXAMPLES = r"""
-- name: Attach a volume
- community.general.profitbricks_volume_attachments:
- datacenter: Tardis One
- server: node002
- volume: vol01
- wait_timeout: 500
- state: present
-
-- name: Detach a volume
- community.general.profitbricks_volume_attachments:
- datacenter: Tardis One
- server: node002
- volume: vol01
- wait_timeout: 500
- state: absent
-"""
-
-import re
-import time
-
-HAS_PB_SDK = True
-try:
- from profitbricks.client import ProfitBricksService
-except ImportError:
- HAS_PB_SDK = False
-
-from ansible.module_utils.basic import AnsibleModule
-
-
-uuid_match = re.compile(
- r'[\w]{8}-[\w]{4}-[\w]{4}-[\w]{4}-[\w]{12}', re.I)
-
-
-def _wait_for_completion(profitbricks, promise, wait_timeout, msg):
- if not promise:
- return
- wait_timeout = time.time() + wait_timeout
- while wait_timeout > time.time():
- time.sleep(5)
- operation_result = profitbricks.get_request(
- request_id=promise['requestId'],
- status=True)
-
- if operation_result['metadata']['status'] == "DONE":
- return
- elif operation_result['metadata']['status'] == "FAILED":
- raise Exception(
- 'Request failed to complete ' + msg + ' "' + str(
- promise['requestId']) + '" to complete.')
-
- raise Exception(
- 'Timed out waiting for async operation ' + msg + ' "' + str(
- promise['requestId']
- ) + '" to complete.')
-
-
-def attach_volume(module, profitbricks):
- """
- Attaches a volume.
-
- This will attach a volume to the server.
-
- module : AnsibleModule object
- profitbricks: authenticated profitbricks object.
-
- Returns:
- True if the volume was attached, false otherwise
- """
- datacenter = module.params.get('datacenter')
- server = module.params.get('server')
- volume = module.params.get('volume')
-
- # Locate UUID for Datacenter
- if not (uuid_match.match(datacenter)):
- datacenter_list = profitbricks.list_datacenters()
- for d in datacenter_list['items']:
- dc = profitbricks.get_datacenter(d['id'])
- if datacenter == dc['properties']['name']:
- datacenter = d['id']
- break
-
- # Locate UUID for Server
- if not (uuid_match.match(server)):
- server_list = profitbricks.list_servers(datacenter)
- for s in server_list['items']:
- if server == s['properties']['name']:
- server = s['id']
- break
-
- # Locate UUID for Volume
- if not (uuid_match.match(volume)):
- volume_list = profitbricks.list_volumes(datacenter)
- for v in volume_list['items']:
- if volume == v['properties']['name']:
- volume = v['id']
- break
-
- return profitbricks.attach_volume(datacenter, server, volume)
-
-
-def detach_volume(module, profitbricks):
- """
- Detaches a volume.
-
- This will remove a volume from the server.
-
- module : AnsibleModule object
- profitbricks: authenticated profitbricks object.
-
- Returns:
- True if the volume was detached, false otherwise
- """
- datacenter = module.params.get('datacenter')
- server = module.params.get('server')
- volume = module.params.get('volume')
-
- # Locate UUID for Datacenter
- if not (uuid_match.match(datacenter)):
- datacenter_list = profitbricks.list_datacenters()
- for d in datacenter_list['items']:
- dc = profitbricks.get_datacenter(d['id'])
- if datacenter == dc['properties']['name']:
- datacenter = d['id']
- break
-
- # Locate UUID for Server
- if not (uuid_match.match(server)):
- server_list = profitbricks.list_servers(datacenter)
- for s in server_list['items']:
- if server == s['properties']['name']:
- server = s['id']
- break
-
- # Locate UUID for Volume
- if not (uuid_match.match(volume)):
- volume_list = profitbricks.list_volumes(datacenter)
- for v in volume_list['items']:
- if volume == v['properties']['name']:
- volume = v['id']
- break
-
- return profitbricks.detach_volume(datacenter, server, volume)
-
-
-def main():
- module = AnsibleModule(
- argument_spec=dict(
- datacenter=dict(),
- server=dict(),
- volume=dict(),
- subscription_user=dict(),
- subscription_password=dict(no_log=True),
- wait=dict(type='bool', default=True),
- wait_timeout=dict(type='int', default=600),
- state=dict(default='present'),
- )
- )
-
- if not HAS_PB_SDK:
- module.fail_json(msg='profitbricks required for this module')
-
- if not module.params.get('subscription_user'):
- module.fail_json(msg='subscription_user parameter is required')
- if not module.params.get('subscription_password'):
- module.fail_json(msg='subscription_password parameter is required')
- if not module.params.get('datacenter'):
- module.fail_json(msg='datacenter parameter is required')
- if not module.params.get('server'):
- module.fail_json(msg='server parameter is required')
- if not module.params.get('volume'):
- module.fail_json(msg='volume parameter is required')
-
- subscription_user = module.params.get('subscription_user')
- subscription_password = module.params.get('subscription_password')
-
- profitbricks = ProfitBricksService(
- username=subscription_user,
- password=subscription_password)
-
- state = module.params.get('state')
-
- if state == 'absent':
- try:
- (changed) = detach_volume(module, profitbricks)
- module.exit_json(changed=changed)
- except Exception as e:
- module.fail_json(msg='failed to set volume_attach state: %s' % str(e))
- elif state == 'present':
- try:
- attach_volume(module, profitbricks)
- module.exit_json()
- except Exception as e:
- module.fail_json(msg='failed to set volume_attach state: %s' % str(e))
-
-
-if __name__ == '__main__':
- main()
diff --git a/plugins/modules/proxmox.py b/plugins/modules/proxmox.py
deleted file mode 100644
index 9dd4b13d5c..0000000000
--- a/plugins/modules/proxmox.py
+++ /dev/null
@@ -1,1738 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-#
-# Copyright Ansible Project
-# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-from __future__ import absolute_import, division, print_function
-
-__metaclass__ = type
-
-DOCUMENTATION = r"""
-module: proxmox
-short_description: Management of instances in Proxmox VE cluster
-description:
- - Allows you to create/delete/stop instances in Proxmox VE cluster.
- - The module automatically detects containerization type (lxc for PVE 4, openvz for older).
- - Since community.general 4.0.0 on, there are no more default values.
-attributes:
- check_mode:
- support: none
- diff_mode:
- support: none
- action_group:
- version_added: 9.0.0
-options:
- password:
- description:
- - The instance root password.
- type: str
- hostname:
- description:
- - The instance hostname.
- - Required only for O(state=present).
- - Must be unique if vmid is not passed.
- type: str
- ostemplate:
- description:
- - The template for VM creating.
- - Required only for O(state=present).
- type: str
- disk:
- description:
- - This option was previously described as "hard disk size in GB for instance" however several formats describing a lxc
- mount are permitted.
- - Older versions of Proxmox will accept a numeric value for size using the O(storage) parameter to automatically choose
- which storage to allocate from, however new versions enforce the C(:) syntax.
- - Additional options are available by using some combination of the following key-value pairs as a comma-delimited list
- C([volume=]
- [,acl=<1|0>] [,mountoptions=] [,replicate=<1|0>] [,ro=<1|0>] [,shared=<1|0>]
- [,size=]).
- - See U(https://pve.proxmox.com/wiki/Linux_Container) for a full description.
- - This option is mutually exclusive with O(disk_volume).
- type: str
- disk_volume:
- description:
- - Specify a hash/dictionary of the C(rootfs) disk.
- - See U(https://pve.proxmox.com/wiki/Linux_Container#pct_mount_points) for a full description.
- - This option is mutually exclusive with O(storage) and O(disk).
- type: dict
- version_added: 9.2.0
- suboptions:
- storage:
- description:
- - O(disk_volume.storage) is the storage identifier of the storage to use for the C(rootfs).
- - Mutually exclusive with O(disk_volume.host_path).
- type: str
- volume:
- description:
- - O(disk_volume.volume) is the name of an existing volume.
- - If not defined, the module will check if one exists. If not, a new volume will be created.
- - If defined, the volume must exist under that name.
- - Required only if O(disk_volume.storage) is defined, and mutually exclusive with O(disk_volume.host_path).
- type: str
- size:
- description:
- - O(disk_volume.size) is the size of the storage to use.
- - The size is given in GiB.
- - Required only if O(disk_volume.storage) is defined, and mutually exclusive with O(disk_volume.host_path).
- type: int
- host_path:
- description:
- - O(disk_volume.host_path) defines a bind or device path on the PVE host to use for the C(rootfs).
- - Mutually exclusive with O(disk_volume.storage), O(disk_volume.volume), and O(disk_volume.size).
- type: path
- options:
- description:
- - O(disk_volume.options) is a dict of extra options.
- - The value of any given option must be a string, for example V("1").
- type: dict
- cores:
- description:
- - Specify number of cores per socket.
- type: int
- cpus:
- description:
- - Number of allocated cpus for instance.
- type: int
- memory:
- description:
- - Memory size in MB for instance.
- type: int
- swap:
- description:
- - Swap memory size in MB for instance.
- type: int
- netif:
- description:
- - Specifies network interfaces for the container. As a hash/dictionary defining interfaces.
- type: dict
- features:
- description:
- - Specifies a list of features to be enabled. For valid options, see U(https://pve.proxmox.com/wiki/Linux_Container#pct_options).
- - Some features require the use of a privileged container.
- type: list
- elements: str
- version_added: 2.0.0
- startup:
- description:
- - Specifies the startup order of the container.
- - Use C(order=#) where C(#) is a non-negative number to define the general startup order. Shutdown in done with reverse
- ordering.
- - Use C(up=#) where C(#) is in seconds, to specify a delay to wait before the next VM is started.
- - Use C(down=#) where C(#) is in seconds, to specify a delay to wait before the next VM is stopped.
- type: list
- elements: str
- version_added: 8.5.0
- mounts:
- description:
- - Specifies additional mounts (separate disks) for the container. As a hash/dictionary defining mount points as strings.
- - This Option is mutually exclusive with O(mount_volumes).
- type: dict
- mount_volumes:
- description:
- - Specify additional mounts (separate disks) for the container. As a hash/dictionary defining mount points.
- - See U(https://pve.proxmox.com/wiki/Linux_Container#pct_mount_points) for a full description.
- - This Option is mutually exclusive with O(mounts).
- type: list
- elements: dict
- version_added: 9.2.0
- suboptions:
- id:
- description:
- - O(mount_volumes[].id) is the identifier of the mount point written as C(mp[n]).
- type: str
- required: true
- storage:
- description:
- - O(mount_volumes[].storage) is the storage identifier of the storage to use.
- - Mutually exclusive with O(mount_volumes[].host_path).
- type: str
- volume:
- description:
- - O(mount_volumes[].volume) is the name of an existing volume.
- - If not defined, the module will check if one exists. If not, a new volume will be created.
- - If defined, the volume must exist under that name.
- - Required only if O(mount_volumes[].storage) is defined and mutually exclusive with O(mount_volumes[].host_path).
- type: str
- size:
- description:
- - O(mount_volumes[].size) is the size of the storage to use.
- - The size is given in GiB.
- - Required only if O(mount_volumes[].storage) is defined and mutually exclusive with O(mount_volumes[].host_path).
- type: int
- host_path:
- description:
- - O(mount_volumes[].host_path) defines a bind or device path on the PVE host to use for the C(rootfs).
- - Mutually exclusive with O(mount_volumes[].storage), O(mount_volumes[].volume), and O(mount_volumes[].size).
- type: path
- mountpoint:
- description:
- - O(mount_volumes[].mountpoint) is the mount point of the volume.
- type: path
- required: true
- options:
- description:
- - O(mount_volumes[].options) is a dict of extra options.
- - The value of any given option must be a string, for example V("1").
- type: dict
- ip_address:
- description:
- - Specifies the address the container will be assigned.
- type: str
- onboot:
- description:
- - Specifies whether a VM will be started during system bootup.
- type: bool
- storage:
- description:
- - Target storage.
- - This option is mutually exclusive with O(disk_volume) and O(mount_volumes).
- type: str
- default: 'local'
- ostype:
- description:
- - Specifies the C(ostype) of the LXC container.
- - If set to V(auto), no C(ostype) will be provided on instance creation.
- choices: ['auto', 'debian', 'devuan', 'ubuntu', 'centos', 'fedora', 'opensuse', 'archlinux', 'alpine', 'gentoo', 'nixos',
- 'unmanaged']
- type: str
- default: 'auto'
- version_added: 8.1.0
- cpuunits:
- description:
- - CPU weight for a VM.
- type: int
- nameserver:
- description:
- - Sets DNS server IP address for a container.
- type: str
- searchdomain:
- description:
- - Sets DNS search domain for a container.
- type: str
- tags:
- description:
- - List of tags to apply to the container.
- - Tags must start with V([a-z0-9_]) followed by zero or more of the following characters V([a-z0-9_-+.]).
- - Tags are only available in Proxmox 7+.
- type: list
- elements: str
- version_added: 6.2.0
- timeout:
- description:
- - Timeout for operations.
- type: int
- default: 30
- update:
- description:
- - If V(true), the container will be updated with new values.
- - The current default value of V(false) is deprecated and should will change to V(true) in community.general 11.0.0.
- Please set O(update) explicitly to V(false) or V(true) to avoid surprises and get rid of the deprecation warning.
- type: bool
- version_added: 8.1.0
- force:
- description:
- - Forcing operations.
- - Can be used only with states V(present), V(stopped), V(restarted).
- - With O(state=present) force option allow to overwrite existing container.
- - With states V(stopped), V(restarted) allow to force stop instance.
- type: bool
- default: false
- purge:
- description:
- - Remove container from all related configurations.
- - For example backup jobs, replication jobs, or HA.
- - Related ACLs and Firewall entries will always be removed.
- - Used with O(state=absent).
- type: bool
- default: false
- version_added: 2.3.0
- state:
- description:
- - Indicate desired state of the instance.
- - V(template) was added in community.general 8.1.0.
- type: str
- choices: ['present', 'started', 'absent', 'stopped', 'restarted', 'template']
- default: present
- pubkey:
- description:
- - Public key to add to /root/.ssh/authorized_keys. This was added on Proxmox 4.2, it is ignored for earlier versions.
- type: str
- unprivileged:
- description:
- - Indicate if the container should be unprivileged.
- - The default change to V(true) in community.general 7.0.0. It used to be V(false) before.
- type: bool
- default: true
- description:
- description:
- - Specify the description for the container. Only used on the configuration web interface.
- - This is saved as a comment inside the configuration file.
- type: str
- version_added: '0.2.0'
- hookscript:
- description:
- - Script that will be executed during various steps in the containers lifetime.
- type: str
- version_added: '0.2.0'
- timezone:
- description:
- - Timezone used by the container, accepts values like V(Europe/Paris).
- - The special value V(host) configures the same timezone used by Proxmox host.
- type: str
- version_added: '7.1.0'
- clone:
- description:
- - ID of the container to be cloned.
- - O(description), O(hostname), and O(pool) will be copied from the cloned container if not specified.
- - The type of clone created is defined by the O(clone_type) parameter.
- - This operator is only supported for Proxmox clusters that use LXC containerization (PVE version >= 4).
- type: int
- version_added: 4.3.0
- clone_type:
- description:
- - Type of the clone created.
- - V(full) creates a full clone, and O(storage) must be specified.
- - V(linked) creates a linked clone, and the cloned container must be a template container.
- - V(opportunistic) creates a linked clone if the cloned container is a template container, and a full clone if not.
- O(storage) may be specified, if not it will fall back to the default.
- type: str
- choices: ['full', 'linked', 'opportunistic']
- default: opportunistic
- version_added: 4.3.0
-author: Sergei Antipov (@UnderGreen)
-seealso:
- - module: community.general.proxmox_vm_info
-extends_documentation_fragment:
- - community.general.proxmox.actiongroup_proxmox
- - community.general.proxmox.documentation
- - community.general.proxmox.selection
- - community.general.attributes
-"""
-
-EXAMPLES = r"""
-- name: Create new container with minimal options
- community.general.proxmox:
- vmid: 100
- node: uk-mc02
- api_user: root@pam
- api_password: 1q2w3e
- api_host: node1
- password: 123456
- hostname: example.org
- ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz'
-
-- name: Create new container with minimal options specifying disk storage location and size
- community.general.proxmox:
- vmid: 100
- node: uk-mc02
- api_user: root@pam
- api_password: 1q2w3e
- api_host: node1
- password: 123456
- hostname: example.org
- ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz'
- disk: 'local-lvm:20'
-
-- name: Create new container with minimal options specifying disk storage location and size via disk_volume
- community.general.proxmox:
- vmid: 100
- node: uk-mc02
- api_user: root@pam
- api_password: 1q2w3e
- api_host: node1
- password: 123456
- hostname: example.org
- ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz'
- disk_volume:
- storage: local
- size: 20
-
-- name: Create new container with hookscript and description
- community.general.proxmox:
- vmid: 100
- node: uk-mc02
- api_user: root@pam
- api_password: 1q2w3e
- api_host: node1
- password: 123456
- hostname: example.org
- ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz'
- hookscript: 'local:snippets/vm_hook.sh'
- description: created with ansible
-
-- name: Create new container automatically selecting the next available vmid.
- community.general.proxmox:
- node: 'uk-mc02'
- api_user: 'root@pam'
- api_password: '1q2w3e'
- api_host: 'node1'
- password: '123456'
- hostname: 'example.org'
- ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz'
-
-- name: Create new container with minimal options with force(it will rewrite existing container)
- community.general.proxmox:
- vmid: 100
- node: uk-mc02
- api_user: root@pam
- api_password: 1q2w3e
- api_host: node1
- password: 123456
- hostname: example.org
- ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz'
- force: true
-
-- name: Create new container with minimal options use environment PROXMOX_PASSWORD variable(you should export it before)
- community.general.proxmox:
- vmid: 100
- node: uk-mc02
- api_user: root@pam
- api_host: node1
- password: 123456
- hostname: example.org
- ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz'
-
-- name: Create new container with minimal options defining network interface with dhcp
- community.general.proxmox:
- vmid: 100
- node: uk-mc02
- api_user: root@pam
- api_password: 1q2w3e
- api_host: node1
- password: 123456
- hostname: example.org
- ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz'
- netif:
- net0: "name=eth0,ip=dhcp,ip6=dhcp,bridge=vmbr0"
-
-- name: Create new container with minimal options defining network interface with static ip
- community.general.proxmox:
- vmid: 100
- node: uk-mc02
- api_user: root@pam
- api_password: 1q2w3e
- api_host: node1
- password: 123456
- hostname: example.org
- ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz'
- netif:
- net0: "name=eth0,gw=192.168.0.1,ip=192.168.0.2/24,bridge=vmbr0"
-
-- name: Create new container with more options defining network interface with static ip4 and ip6 with vlan-tag and mtu
- community.general.proxmox:
- vmid: 100
- node: uk-mc02
- api_user: root@pam
- api_password: 1q2w3e
- api_host: node1
- password: 123456
- hostname: example.org
- ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz'
- netif:
- net0: "name=eth0,gw=192.168.0.1,ip=192.168.0.2/24,ip6=fe80::1227/64,gw6=fe80::1,bridge=vmbr0,firewall=1,tag=934,mtu=1500"
-
-- name: Create new container with minimal options defining a mount with 8GB
- community.general.proxmox:
- vmid: 100
- node: uk-mc02
- api_user: root@pam
- api_password: 1q2w3e
- api_host: node1
- password: 123456
- hostname: example.org
- ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz'
- mounts:
- mp0: "local:8,mp=/mnt/test/"
-
-- name: Create new container with minimal options defining a mount with 8GB using mount_volumes
- community.general.proxmox:
- vmid: 100
- node: uk-mc02
- api_user: root@pam
- api_password: 1q2w3e
- api_host: node1
- password: 123456
- hostname: example.org
- ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz'
- mount_volumes:
- - id: mp0
- storage: local
- size: 8
- mountpoint: /mnt/test
-
-- name: Create new container with minimal options defining a cpu core limit
- community.general.proxmox:
- vmid: 100
- node: uk-mc02
- api_user: root@pam
- api_password: 1q2w3e
- api_host: node1
- password: 123456
- hostname: example.org
- ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz'
- cores: 2
-
-- name: Create new container with minimal options and same timezone as proxmox host
- community.general.proxmox:
- vmid: 100
- node: uk-mc02
- api_user: root@pam
- api_password: 1q2w3e
- api_host: node1
- password: 123456
- hostname: example.org
- ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz'
- timezone: host
-
-- name: Create a new container with nesting enabled and allows the use of CIFS/NFS inside the container.
- community.general.proxmox:
- vmid: 100
- node: uk-mc02
- api_user: root@pam
- api_password: 1q2w3e
- api_host: node1
- password: 123456
- hostname: example.org
- ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz'
- features:
- - nesting=1
- - mount=cifs,nfs
-
-- name: >
- Create a linked clone of the template container with id 100. The newly created container with be a
- linked clone, because no storage parameter is defined
- community.general.proxmox:
- vmid: 201
- node: uk-mc02
- api_user: root@pam
- api_password: 1q2w3e
- api_host: node1
- clone: 100
- hostname: clone.example.org
-
-- name: Create a full clone of the container with id 100
- community.general.proxmox:
- vmid: 201
- node: uk-mc02
- api_user: root@pam
- api_password: 1q2w3e
- api_host: node1
- clone: 100
- hostname: clone.example.org
- storage: local
-
-- name: Update container configuration
- community.general.proxmox:
- vmid: 100
- node: uk-mc02
- api_user: root@pam
- api_password: 1q2w3e
- api_host: node1
- netif:
- net0: "name=eth0,gw=192.168.0.1,ip=192.168.0.3/24,bridge=vmbr0"
- update: true
-
-- name: Start container
- community.general.proxmox:
- vmid: 100
- api_user: root@pam
- api_password: 1q2w3e
- api_host: node1
- state: started
-
-- name: >
- Start container with mount. You should enter a 90-second timeout because servers
- with additional disks take longer to boot
- community.general.proxmox:
- vmid: 100
- api_user: root@pam
- api_password: 1q2w3e
- api_host: node1
- state: started
- timeout: 90
-
-- name: Stop container
- community.general.proxmox:
- vmid: 100
- api_user: root@pam
- api_password: 1q2w3e
- api_host: node1
- state: stopped
-
-- name: Stop container with force
- community.general.proxmox:
- vmid: 100
- api_user: root@pam
- api_password: 1q2w3e
- api_host: node1
- force: true
- state: stopped
-
-- name: Restart container(stopped or mounted container you can't restart)
- community.general.proxmox:
- vmid: 100
- api_user: root@pam
- api_password: 1q2w3e
- api_host: node1
- state: restarted
-
-- name: Convert container to template
- community.general.proxmox:
- vmid: 100
- api_user: root@pam
- api_password: 1q2w3e
- api_host: node1
- state: template
-
-- name: Convert container to template (stop container if running)
- community.general.proxmox:
- vmid: 100
- api_user: root@pam
- api_password: 1q2w3e
- api_host: node1
- state: template
- force: true
-
-- name: Remove container
- community.general.proxmox:
- vmid: 100
- api_user: root@pam
- api_password: 1q2w3e
- api_host: node1
- state: absent
-"""
-
-import re
-import time
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible.module_utils.common.text.converters import to_native
-from ansible_collections.community.general.plugins.module_utils.proxmox import (
- ProxmoxAnsible,
- ansible_to_proxmox_bool,
- proxmox_auth_argument_spec,
-)
-from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
-
-
-def get_proxmox_args():
- return dict(
- vmid=dict(type="int", required=False),
- node=dict(),
- pool=dict(),
- password=dict(no_log=True),
- hostname=dict(),
- ostemplate=dict(),
- disk=dict(type="str"),
- disk_volume=dict(
- type="dict",
- options=dict(
- storage=dict(type="str"),
- volume=dict(type="str"),
- size=dict(type="int"),
- host_path=dict(type="path"),
- options=dict(type="dict"),
- ),
- required_together=[("storage", "size")],
- required_by={
- "volume": ("storage", "size"),
- },
- mutually_exclusive=[
- ("host_path", "storage"),
- ("host_path", "volume"),
- ("host_path", "size"),
- ],
- ),
- cores=dict(type="int"),
- cpus=dict(type="int"),
- memory=dict(type="int"),
- swap=dict(type="int"),
- netif=dict(type="dict"),
- mounts=dict(type="dict"),
- mount_volumes=dict(
- type="list",
- elements="dict",
- options=dict(
- id=(dict(type="str", required=True)),
- storage=dict(type="str"),
- volume=dict(type="str"),
- size=dict(type="int"),
- host_path=dict(type="path"),
- mountpoint=dict(type="path", required=True),
- options=dict(type="dict"),
- ),
- required_together=[("storage", "size")],
- required_by={
- "volume": ("storage", "size"),
- },
- mutually_exclusive=[
- ("host_path", "storage"),
- ("host_path", "volume"),
- ("host_path", "size"),
- ],
- ),
- ip_address=dict(),
- ostype=dict(
- default="auto",
- choices=[
- "auto",
- "debian",
- "devuan",
- "ubuntu",
- "centos",
- "fedora",
- "opensuse",
- "archlinux",
- "alpine",
- "gentoo",
- "nixos",
- "unmanaged",
- ],
- ),
- onboot=dict(type="bool"),
- features=dict(type="list", elements="str"),
- startup=dict(type="list", elements="str"),
- storage=dict(default="local"),
- cpuunits=dict(type="int"),
- nameserver=dict(),
- searchdomain=dict(),
- timeout=dict(type="int", default=30),
- update=dict(type="bool"),
- force=dict(type="bool", default=False),
- purge=dict(type="bool", default=False),
- state=dict(
- default="present",
- choices=[
- "present",
- "absent",
- "stopped",
- "started",
- "restarted",
- "template",
- ],
- ),
- pubkey=dict(type="str"),
- unprivileged=dict(type="bool", default=True),
- description=dict(type="str"),
- hookscript=dict(type="str"),
- timezone=dict(type="str"),
- clone=dict(type="int"),
- clone_type=dict(
- default="opportunistic", choices=["full", "linked", "opportunistic"]
- ),
- tags=dict(type="list", elements="str"),
- )
-
-
-def get_ansible_module():
- module_args = proxmox_auth_argument_spec()
- module_args.update(get_proxmox_args())
-
- return AnsibleModule(
- argument_spec=module_args,
- required_if=[
- ("state", "present", ["node", "hostname"]),
- # Require one of clone, ostemplate, or update.
- # Together with mutually_exclusive this ensures that we either
- # clone a container or create a new one from a template file.
- ("state", "present", ("clone", "ostemplate", "update"), True),
- ],
- required_together=[("api_token_id", "api_token_secret")],
- required_one_of=[
- ("api_password", "api_token_id"),
- ("vmid", "hostname"),
- ],
- mutually_exclusive=[
- # Creating a new container is done either by cloning an existing one, or based on a template.
- ("clone", "ostemplate", "update"),
- ("disk", "disk_volume"),
- ("storage", "disk_volume"),
- ("mounts", "mount_volumes"),
- ],
- )
-
-
-class ProxmoxLxcAnsible(ProxmoxAnsible):
- MINIMUM_VERSIONS = {
- "disk_volume": "5.0",
- "mount_volumes": "5.0",
- "tags": "6.1",
- "timezone": "6.3",
- }
-
- def __init__(self, module):
- super(ProxmoxLxcAnsible, self).__init__(module)
-
- self.VZ_TYPE = "openvz" if self.version() < LooseVersion("4.0") else "lxc"
- self.params = self.module.params
-
- def run(self):
- self.check_supported_features()
-
- state = self.params.get("state")
-
- vmid = self.params.get("vmid")
- hostname = self.params.get("hostname")
-
- if not vmid and not hostname:
- self.module.fail_json(msg="Either VMID or hostname must be provided.")
-
- if state == "present":
- self.lxc_present(
- vmid,
- hostname,
- node=self.params.get("node"),
- update=self.params.get("update"),
- force=self.params.get("force"),
- )
- elif state == "absent":
- self.lxc_absent(
- vmid,
- hostname,
- node=self.params.get("node"),
- timeout=self.params.get("timeout"),
- purge=self.params.get("purge"),
- )
- elif state == "started":
- self.lxc_started(
- vmid,
- hostname,
- node=self.params.get("node"),
- timeout=self.params.get("timeout"),
- )
- elif state == "stopped":
- self.lxc_stopped(
- vmid,
- hostname,
- node=self.params.get("node"),
- timeout=self.params.get("timeout"),
- force=self.params.get("force"),
- )
- elif state == "restarted":
- self.lxc_restarted(
- vmid,
- hostname,
- node=self.params.get("node"),
- timeout=self.params.get("timeout"),
- force=self.params.get("force"),
- )
- elif state == "template":
- self.lxc_to_template(
- vmid,
- hostname,
- node=self.params.get("node"),
- timeout=self.params.get("timeout"),
- force=self.params.get("force"),
- )
-
- def lxc_present(self, vmid, hostname, node, update, force):
- try:
- lxc = self.get_lxc_resource(vmid, hostname)
- vmid = vmid or lxc["id"].split("/")[-1]
- node = node or lxc["node"]
- except LookupError:
- lxc = None
- vmid = vmid or self.get_nextvmid()
-
- if node is None:
- raise ValueError(
- "Argument 'node' is None, but should be found from VMID/hostname or provided."
- )
-
- # check if the container exists already
- if lxc is not None:
- if update is None:
- # TODO: Remove deprecation warning in version 11.0.0
- self.module.deprecate(
- msg="The default value of false for 'update' has been deprecated and will be changed to true in version 11.0.0.",
- version="11.0.0",
- collection_name="community.general",
- )
- update = False
-
- if update:
- # Update it if we should
- identifier = self.format_vm_identifier(vmid, hostname)
- self.update_lxc_instance(
- vmid,
- node,
- cores=self.params.get("cores"),
- cpus=self.params.get("cpus"),
- cpuunits=self.params.get("cpuunits"),
- description=self.params.get("description"),
- disk=self.params.get("disk"),
- disk_volume=self.params.get("disk_volume"),
- features=self.params.get("features"),
- hookscript=self.params.get("hookscript"),
- hostname=self.params.get("hostname"),
- ip_address=self.params.get("ip_address"),
- memory=self.params.get("memory"),
- mounts=self.params.get("mounts"),
- mount_volumes=self.params.get("mount_volumes"),
- nameserver=self.params.get("nameserver"),
- netif=self.params.get("netif"),
- onboot=ansible_to_proxmox_bool(self.params.get("onboot")),
- searchdomain=self.params.get("searchdomain"),
- startup=self.params.get("startup"),
- swap=self.params.get("swap"),
- tags=self.params.get("tags"),
- timezone=self.params.get("timezone"),
- )
- self.module.exit_json(
- changed=True, vmid=vmid, msg="VM %s has been updated." % identifier
- )
- elif not force:
- # We're done if it shouldn't be forcefully created
- identifier = self.format_vm_identifier(vmid, lxc["name"])
- self.module.exit_json(
- changed=False, vmid=vmid, msg="VM %s already exists." % identifier
- )
- self.module.debug(
- "VM %s already exists, but we don't update and instead forcefully recreate it."
- % identifier
- )
-
- self.new_lxc_instance(
- vmid,
- hostname,
- node=self.params.get("node"),
- clone_from=self.params.get("clone"),
- ostemplate=self.params.get("ostemplate"),
- force=force,
- )
-
- def lxc_absent(self, vmid, hostname, node, timeout, purge):
- try:
- lxc = self.get_lxc_resource(vmid, hostname)
- except LookupError:
- identifier = self.format_vm_identifier(vmid, hostname)
- self.module.exit_json(
- changed=False, vmid=vmid, msg="VM %s is already absent." % (identifier)
- )
-
- vmid = vmid or lxc["id"].split("/")[-1]
- node = node or lxc["node"]
-
- lxc_status = self.get_lxc_status(vmid, node)
- identifier = self.format_vm_identifier(vmid, hostname)
-
- if lxc_status == "running":
- self.module.exit_json(
- changed=False,
- vmid=vmid,
- msg="VM %s is running. Stop it before deletion." % identifier,
- )
- if lxc_status == "mounted":
- self.module.exit_json(
- changed=False,
- vmid=vmid,
- msg="VM %s is mounted. Stop it with force option before deletion."
- % identifier,
- )
-
- self.remove_lxc_instance(vmid, node, timeout, purge)
- self.module.exit_json(
- changed=True, vmid=vmid, msg="VM %s removed." % identifier
- )
-
- def lxc_started(self, vmid, hostname, node, timeout):
- lxc = self.get_lxc_resource(vmid, hostname)
- vmid = vmid or lxc["id"].split("/")[-1]
- hostname = hostname or lxc["name"]
- identifier = self.format_vm_identifier(vmid, hostname)
- node = node or lxc["node"]
- lxc_status = self.get_lxc_status(vmid, lxc["node"])
-
- if lxc_status == "running":
- self.module.exit_json(
- changed=False, vmid=vmid, msg="VM %s is already running." % identifier
- )
-
- self.start_lxc_instance(vmid, node, timeout)
- self.module.exit_json(
- changed=True, vmid=vmid, msg="VM %s started." % identifier
- )
-
- def lxc_stopped(self, vmid, hostname, node, timeout, force):
- lxc = self.get_lxc_resource(vmid, hostname)
- vmid = vmid or lxc["id"].split("/")[-1]
- hostname = hostname or lxc["name"]
- identifier = self.format_vm_identifier(vmid, hostname)
- node = node or lxc["node"]
- lxc_status = self.get_lxc_status(vmid, node)
-
- if lxc_status == "mounted":
- if force:
- self.umount_lxc_instance(vmid, hostname, timeout)
- else:
- self.module.exit_json(
- changed=False,
- vmid=vmid,
- msg="VM %s is already stopped, but mounted. Use force option to umount it."
- % identifier,
- )
-
- if lxc_status == "stopped":
- self.module.exit_json(
- changed=False, vmid=vmid, msg="VM %s is already stopped." % identifier
- )
-
- self.stop_lxc_instance(vmid, node, timeout, force)
- self.module.exit_json(
- changed=True, vmid=vmid, msg="VM %s stopped." % identifier
- )
-
- def lxc_restarted(self, vmid, hostname, node, timeout, force):
- lxc = self.get_lxc_resource(vmid, hostname)
-
- vmid = vmid or lxc["id"].split("/")[-1]
- hostname = hostname or lxc["name"]
- node = node or lxc["node"]
-
- identifier = self.format_vm_identifier(vmid, hostname)
- lxc_status = self.get_lxc_status(vmid, node)
-
- if lxc_status in ["stopped", "mounted"]:
- self.module.exit_json(
- changed=False, vmid=vmid, msg="VM %s is not running." % identifier
- )
-
- self.stop_lxc_instance(vmid, node, timeout, force)
- self.start_lxc_instance(vmid, node, timeout)
- self.module.exit_json(
- changed=True, vmid=vmid, msg="VM %s is restarted." % identifier
- )
-
- def lxc_to_template(self, vmid, hostname, node, timeout, force):
- lxc = self.get_lxc_resource(vmid, hostname)
- vmid = vmid or lxc["id"].split("/")[-1]
- hostname = hostname or lxc["name"]
- node = node or lxc["node"]
- identifier = self.format_vm_identifier(vmid, hostname)
-
- if self.is_template_container(node, vmid):
- self.module.exit_json(
- changed=False,
- vmid=vmid,
- msg="VM %s is already a template." % identifier,
- )
-
- lxc_status = self.get_lxc_status(vmid, node)
- if lxc_status == "running" and force:
- self.stop_instance(vmid, hostname, node, timeout, force)
-
- proxmox_node = self.proxmox_api.nodes(node)
- getattr(proxmox_node, self.VZ_TYPE)(vmid).template.post()
- self.module.exit_json(
- changed=True, vmid=vmid, msg="VM %s converted to template." % identifier
- )
-
- def update_lxc_instance(self, vmid, node, **kwargs):
- if self.VZ_TYPE != "lxc":
- self.module.fail_json(
- msg="Updating LXC containers is only supported for LXC-enabled clusters in PVE 4.0 and above."
- )
-
- kwargs = {k: v for k, v in kwargs.items() if v is not None}
-
- self.validate_tags(kwargs.get("tags", []))
-
- if "features" in kwargs:
- kwargs["features"] = ",".join(kwargs.pop("features"))
- if "startup" in kwargs:
- kwargs["startup"] = ",".join(kwargs.pop("startup"))
-
- disk_updates = self.process_disk_keys(
- vmid,
- node,
- kwargs.pop("disk", None),
- kwargs.pop("disk_volume", None),
- )
- mounts_updates = self.process_mount_keys(
- vmid,
- node,
- kwargs.pop("mounts", None),
- kwargs.pop("mount_volumes", None),
- )
- kwargs.update(disk_updates)
- kwargs.update(mounts_updates)
-
- if "cpus" in kwargs:
- kwargs["cpulimit"] = kwargs.pop("cpus")
- if "netif" in kwargs:
- kwargs.update(kwargs.pop("netif"))
-
- if "pubkey" in kwargs:
- pubkey = kwargs.pop("pubkey")
- if self.version() >= LooseVersion("4.2"):
- kwargs["ssh-public-keys"] = pubkey
- else:
- self.module.warn(
- "'pubkey' is not supported for PVE 4.1 and below. Ignoring keyword."
- )
-
- # fetch current config
- proxmox_node = self.proxmox_api.nodes(node)
- current_config = getattr(proxmox_node, self.VZ_TYPE)(vmid).config.get()
-
- # create diff between the current and requested config
- diff = {}
- for arg, value in kwargs.items():
- # if the arg isn't in the current config, it needs to be added
- if arg not in current_config:
- diff[arg] = value
- elif isinstance(value, str):
- # compare all string values as lists as some of them may be lists separated by commas. order doesn't matter
- current_values = current_config[arg].split(",")
- requested_values = value.split(",")
- for new_value in requested_values:
- if new_value not in current_values:
- diff[arg] = value
- break
- # if it's not a list (or string) just compare the values
- # some types don't match with the API, so force a string comparison
- elif str(value) != str(current_config[arg]):
- diff[arg] = value
-
- if not diff:
- self.module.exit_json(
- changed=False, vmid=vmid, msg="Container config is already up to date."
- )
-
- # update the config
- getattr(proxmox_node, self.VZ_TYPE)(vmid).config.put(
- vmid=vmid, node=node, **kwargs
- )
-
- def new_lxc_instance(self, vmid, hostname, node, clone_from, ostemplate, force):
- identifier = self.format_vm_identifier(vmid, hostname)
-
- if clone_from is not None:
- self.clone_lxc_instance(
- vmid,
- node,
- clone_from,
- clone_type=self.params.get("clone_type"),
- timeout=self.params.get("timeout"),
- description=self.params.get("description"),
- hostname=hostname,
- pool=self.params.get("pool"),
- storage=self.params.get("storage"),
- )
- self.module.exit_json(
- changed=True,
- vmid=vmid,
- msg="Cloned VM %s from %d" % (identifier, clone_from),
- )
-
- if ostemplate is not None:
- self.create_lxc_instance(
- vmid,
- node,
- ostemplate,
- timeout=self.params.get("timeout"),
- cores=self.params.get("cores"),
- cpus=self.params.get("cpus"),
- cpuunits=self.params.get("cpuunits"),
- description=self.params.get("description"),
- disk=self.params.get("disk"),
- disk_volume=self.params.get("disk_volume"),
- features=self.params.get("features"),
- force=ansible_to_proxmox_bool(force),
- hookscript=self.params.get("hookscript"),
- hostname=hostname,
- ip_address=self.params.get("ip_address"),
- memory=self.params.get("memory"),
- mounts=self.params.get("mounts"),
- mount_volumes=self.params.get("mount_volumes"),
- nameserver=self.params.get("nameserver"),
- netif=self.params.get("netif"),
- onboot=ansible_to_proxmox_bool(self.params.get("onboot")),
- ostype=self.params.get("ostype"),
- password=self.params.get("password"),
- pool=self.params.get("pool"),
- pubkey=self.params.get("pubkey"),
- searchdomain=self.params.get("searchdomain"),
- startup=self.params.get("startup"),
- storage=self.params.get("storage"),
- swap=self.params.get("swap"),
- tags=self.params.get("tags"),
- timezone=self.params.get("timezone"),
- unprivileged=ansible_to_proxmox_bool(self.params.get("unprivileged")),
- )
- self.module.exit_json(
- changed=True,
- vmid=vmid,
- msg="Created VM %s from template %s" % (identifier, ostemplate),
- )
-
- self.module.fail_json(
- vmid=vmid,
- msg="VM %s does not exist but neither clone nor ostemplate were specified!"
- % identifier,
- )
-
- def create_lxc_instance(self, vmid, node, ostemplate, timeout, **kwargs):
- template_store = ostemplate.split(":")[0]
- if not self.content_check(node, ostemplate, template_store):
- self.module.fail_json(
- vmid=vmid,
- msg="ostemplate %s does not exist on node %s and storage %s."
- % (ostemplate, node, template_store),
- )
-
- disk_updates = self.process_disk_keys(
- vmid,
- node,
- kwargs.pop("disk"),
- kwargs.pop("disk_volume"),
- )
- mounts_updates = self.process_mount_keys(
- vmid,
- node,
- kwargs.pop("mounts"),
- kwargs.pop("mount_volumes"),
- )
- kwargs.update(disk_updates)
- kwargs.update(mounts_updates)
-
- # Remove empty values from kwargs
- kwargs = {k: v for k, v in kwargs.items() if v is not None}
-
- if "features" in kwargs:
- kwargs["features"] = ",".join(kwargs.pop("features"))
-
- if "startup" in kwargs:
- kwargs["startup"] = ",".join(kwargs.pop("startup"))
-
- self.validate_tags(kwargs.get("tags", []))
-
- if self.VZ_TYPE == "lxc":
- if "cpus" in kwargs:
- kwargs["cpuunits"] = kwargs.pop("cpus")
- kwargs.update(kwargs.pop("netif", {}))
- else:
- if "mount_volumes" in kwargs:
- kwargs.pop("mount_volumes")
- self.module.warn(
- "'mount_volumes' is not supported for non-LXC clusters. Ignoring keyword."
- )
-
- if "pubkey" in kwargs:
- pubkey = kwargs.pop("pubkey")
- if self.version() >= LooseVersion("4.2"):
- kwargs["ssh-public-keys"] = pubkey
- else:
- self.module.warn(
- "'pubkey' is not supported for PVE 4.1 and below. Ignoring keyword."
- )
-
- if kwargs.get("ostype") == "auto":
- kwargs.pop("ostype")
-
- proxmox_node = self.proxmox_api.nodes(node)
- taskid = getattr(proxmox_node, self.VZ_TYPE).create(
- vmid=vmid, ostemplate=ostemplate, **kwargs
- )
- self.handle_api_timeout(
- vmid,
- node,
- taskid,
- timeout,
- "Reached timeout while waiting for creation of VM %s from template %s"
- % (vmid, ostemplate),
- )
-
- def clone_lxc_instance(self, vmid, node, clone_from, clone_type, timeout, **kwargs):
- if self.VZ_TYPE != "lxc":
- self.module.fail_json(
- msg="Cloning is only supported for LXC-enabled clusters in PVE 4.0 and above."
- )
-
- # Remove empty values from kwargs
- kwargs = {k: v for k, v in kwargs.items() if v is not None}
-
- target_is_template = self.is_template_container(node, clone_from)
- # By default, create a full copy only when the cloned container is not a template.
- create_full_copy = not target_is_template
-
- # Only accept parameters that are compatible with the clone endpoint.
- valid_clone_parameters = ["hostname", "pool", "description"]
-
- if "storage" not in kwargs and target_is_template:
- # Cloning a template, so create a full copy instead of a linked copy
- create_full_copy = True
- elif "storage" not in kwargs and not target_is_template:
- self.module.fail_json(
- changed=False,
- msg="Clone target container is not a template, storage needs to be specified.",
- )
-
- if clone_type == "linked" and not target_is_template:
- self.module.fail_json(
- changed=False,
- msg="Cloning type 'linked' is only supported for template containers.",
- )
- elif clone_type == "opportunistic" and not target_is_template:
- # Cloned container is not a template, so we need our 'storage' parameter
- valid_clone_parameters.append("storage")
- elif clone_type == "full":
- create_full_copy = True
- valid_clone_parameters.append("storage")
-
- clone_parameters = {}
- clone_parameters["full"] = ansible_to_proxmox_bool(create_full_copy)
-
- for param in valid_clone_parameters:
- if param in kwargs:
- clone_parameters[param] = kwargs[param]
-
- proxmox_node = self.proxmox_api.nodes(node)
- taskid = getattr(proxmox_node, self.VZ_TYPE)(clone_from).clone.post(
- newid=vmid, **clone_parameters
- )
- self.handle_api_timeout(
- vmid,
- node,
- taskid,
- timeout,
- timeout_msg="Reached timeout while waiting for VM to clone.",
- )
-
- def start_lxc_instance(self, vmid, node, timeout):
- proxmox_node = self.proxmox_api.nodes(node)
- taskid = getattr(proxmox_node, self.VZ_TYPE)(vmid).status.start.post()
-
- self.handle_api_timeout(
- vmid,
- node,
- taskid,
- timeout,
- timeout_msg="Reached timeout while waiting for VM to start.",
- )
-
- def stop_lxc_instance(self, vmid, node, timeout, force):
- stop_params = {}
- if force:
- stop_params["forceStop"] = 1
-
- proxmox_node = self.proxmox_api.nodes(node)
- taskid = getattr(proxmox_node, self.VZ_TYPE)(vmid).status.shutdown.post(
- **stop_params
- )
-
- self.handle_api_timeout(
- vmid,
- node,
- taskid,
- timeout,
- timeout_msg="Reached timeout while waiting for VM to stop.",
- )
-
- def umount_lxc_instance(self, vmid, node, timeout):
- proxmox_node = self.proxmox_api.nodes(node)
- taskid = getattr(proxmox_node, self.VZ_TYPE)(vmid).status.unmount.post()
-
- self.handle_api_timeout(
- vmid,
- node,
- taskid,
- timeout,
- timeout_msg="Reached timeout while waiting for VM to be unmounted.",
- )
-
- def remove_lxc_instance(self, vmid, node, timeout, purge):
- delete_params = {}
- if purge:
- delete_params["purge"] = 1
-
- proxmox_node = self.proxmox_api.nodes(node)
- taskid = getattr(proxmox_node, self.VZ_TYPE).delete(vmid, **delete_params)
-
- self.handle_api_timeout(
- vmid,
- node,
- taskid,
- timeout,
- timeout_msg="Reached timeout while waiting for VM to be removed.",
- )
-
- def process_disk_keys(self, vmid, node, disk, disk_volume):
- """
- Process disk keys and return a formatted disk volume with the `rootfs` key.
-
- Args:
- vmid (int): VM identifier.
- node (str): Node identifier.
- disk (str, optional): Disk key in the format 'storage:volume'. Defaults to None.
- disk_volume (Dict[str, Any], optional): Disk volume data. Defaults to None.
-
- Returns:
- Dict[str, str]: Formatted disk volume with the `rootfs` or `disk` key (depending on the `VZ_TYPE`), or an empty dict if no disk volume is specified.
- """
- if disk is None and disk_volume is None:
- return {}
-
- disk_dict = {}
-
- if disk is not None:
- if disk.isdigit():
- disk_dict["rootfs"] = disk
- else:
- disk_volume = self.parse_disk_string(disk)
-
- if disk_volume is not None:
- disk_dict = self.build_volume(vmid, node, key="rootfs", **disk_volume)
-
- if self.VZ_TYPE != "lxc":
- disk_dict["disk"] = disk_dict.pop("rootfs")
-
- return disk_dict
-
- def process_mount_keys(self, vmid, node, mounts, mount_volumes):
- """
- Process mount keys and return a formatted mount volumes with the `mp[n]` keys.
-
- Args:
- vmid (str): VM identifier.
- node (str): Node identifier.
- mounts (str, optional): Mount key in the format 'pool:volume'. Defaults to None.
- mount_volumes (Dict[str, Any], optional): Mount volume data. Defaults to None.
-
- Returns:
- Dict[str, str]: Formatted mount volumes with the `mp[n]` keys, or an empty dict if no mount volumes are specified.
- """
- if mounts is not None:
- mount_volumes = []
- for mount_key, mount_string in mounts.items():
- mount_config = self.parse_disk_string(mount_string)
- mount_volumes.append(dict(id=mount_key, **mount_config))
- elif mount_volumes is None or mount_volumes == []:
- return {}
-
- mounts_dict = {}
- for mount_config in mount_volumes:
- mount_key = mount_config.pop("id")
- mount_dict = self.build_volume(vmid, node, key=mount_key, **mount_config)
- mounts_dict.update(mount_dict)
-
- return mounts_dict
-
- def parse_disk_string(self, disk_string):
- """
- Parse a disk string and return a dictionary with the disk details.
-
- Args:
- disk_string (str): Disk string.
-
- Returns:
- Dict[str, Any]: Disk details.
-
- Note: Below are some example disk strings that this function MUST be able to parse:
- "acl=0,thin1:base-100-disk-1,size=8G"
- "thin1:10,backup=0"
- "local:20"
- "local-lvm:0.50"
- "tmp-dir:300/subvol-300-disk-0.subvol,acl=1,size=0T"
- "tmplog-dir:300/vm-300-disk-0.raw,mp=/var/log,mountoptions=noatime,size=32M"
- "volume=local-lvm:base-100-disk-1,size=20G"
- "/mnt/bindmounts/shared,mp=/shared"
- "volume=/dev/USB01,mp=/mnt/usb01"
- """
- args = disk_string.split(",")
- # If the volume is not explicitly defined but implicit by only passing a key,
- # add the "volume=" key prefix for ease of parsing.
- args = ["volume=" + arg if "=" not in arg else arg for arg in args]
- # Then create a dictionary from the arguments
- disk_kwargs = dict(map(lambda item: item.split("="), args))
-
- VOLUME_PATTERN = r"""(?x)
- ^
- (?:
- (?:
- (?P[\w\-.]+):
- (?:
- (?P\d+\.?\d*)|
- (?P[^,\s]+)
- )
- )|
- (?P[^,\s]+)
- )
- $
- """
- # DISCLAIMER:
- # There are two things called a "volume":
- # 1. The "volume" key which describes the storage volume, device or directory to mount into the container.
- # 2. The storage volume of a storage-backed mount point in the PVE storage sub system.
- # In this section, we parse the "volume" key and check which type of mount point we are dealing with.
- pattern = re.compile(VOLUME_PATTERN)
- volume_string = disk_kwargs.pop("volume")
- match = pattern.match(volume_string)
- if match is None:
- raise ValueError(("Invalid volume string: %s", volume_string))
- match_dict = match.groupdict()
- match_dict = {k: v for k, v in match_dict.items() if v is not None}
-
- if "storage" in match_dict and "volume" in match_dict:
- disk_kwargs["storage"] = match_dict["storage"]
- disk_kwargs["volume"] = match_dict["volume"]
- elif "storage" in match_dict and "size" in match_dict:
- disk_kwargs["storage"] = match_dict["storage"]
- disk_kwargs["size"] = match_dict["size"]
- elif "host_path" in match_dict:
- disk_kwargs["host_path"] = match_dict["host_path"]
-
- # Pattern matching only available in Python 3.10+
- # TODO: Uncomment the following code once only Python 3.10+ is supported
- # match match_dict:
- # case {"storage": storage, "volume": volume}:
- # disk_kwargs["storage"] = storage
- # disk_kwargs["volume"] = volume
-
- # case {"storage": storage, "size": size}:
- # disk_kwargs["storage"] = storage
- # disk_kwargs["size"] = size
-
- # case {"host_path": host_path}:
- # disk_kwargs["host_path"] = host_path
-
- return disk_kwargs
-
- def build_volume(self, vmid, node, key, storage=None, volume=None, host_path=None, size=None, mountpoint=None, options=None, **kwargs):
- """
- Build a volume string for the specified VM.
-
- Args:
- vmid (str): The VM ID.
- node (str): The node where the VM resides.
- key (str): The key for the volume in the VM's config.
- storage (str, optional): The storage pool where the volume resides. Defaults to None.
- volume (str, optional): The name of the volume. Defaults to None.
- host_path (str, optional): The host path to mount. Defaults to None.
- size (str | int, optional): The size of the volume in GiB. Defaults to None.
- mountpoint (str, optional): The mountpoint for the volume. Defaults to None.
- options (Dict[str, Any], optional): Additional options for the volume. Defaults to None.
- **kwargs: Additional keyword arguments.
-
- Returns:
- Dict[str, str]: The built volume string in the format {'volume_key': 'volume_string'}.
-
- Note: Further documentation can be found in the proxmox-api documentation: https://pve.proxmox.com/wiki/Linux_Container#pct_mount_points
- Note: To build a valid volume string, we need ONE of the following:
- A volume name, storage name, and size
- Only a storage name and size (to create a new volume or assign the volume automatically)
- A host directory to mount into the container
- """
- if isinstance(size, int):
- size = str(size)
- if size is not None and isfloat(size):
- size += "G" # default to GiB
- # Handle volume checks/creation
- # TODO: Change the code below to pattern matching once only Python 3.10+ is supported
- # 1. Check if defined volume exists
- if volume is not None:
- storage_content = self.get_storage_content(node, storage, vmid=vmid)
- vol_ids = [vol["volid"] for vol in storage_content]
- volid = "{storage}:{volume}".format(storage=storage, volume=volume)
- if volid not in vol_ids:
- self.module.fail_json(
- changed=False,
- msg="Storage {storage} does not contain volume {volume}".format(
- storage=storage,
- volume=volume,
- ),
- )
- vol_string = "{storage}:{volume},size={size}".format(
- storage=storage, volume=volume, size=size
- )
- # 2. If volume not defined (but storage is), check if it exists
- elif storage is not None:
- proxmox_node = self.proxmox_api.nodes(
- node
- ) # The node must exist, but not the LXC
- try:
- vol = proxmox_node.lxc(vmid).get("config").get(key)
- volume = self.parse_disk_string(vol).get("volume")
- vol_string = "{storage}:{volume},size={size}".format(
- storage=storage, volume=volume, size=size
- )
-
- # If not, we have proxmox create one using the special syntax
- except Exception:
- if size is None:
- raise ValueError(
- "Size must be provided for storage-backed volume creation."
- )
- elif size.endswith("G"):
- size = size.rstrip("G")
- vol_string = "{storage}:{size}".format(storage=storage, size=size)
- else:
- raise ValueError(
- "Size must be provided in GiB for storage-backed volume creation. Convert it to GiB or allocate a new storage manually."
- )
- # 3. If we have a host_path, we don't have storage, a volume, or a size
- # Then we don't have to do anything, just build and return the vol_string
- elif host_path is not None:
- vol_string = ""
- else:
- raise ValueError(
- "Could not build a valid volume string. One of volume, storage, or host_path must be provided."
- )
-
- if host_path is not None:
- vol_string += "," + host_path
-
- if mountpoint is not None:
- vol_string += ",mp={}".format(mountpoint)
-
- if options is not None:
- vol_string += "," + ",".join(
- ["{0}={1}".format(k, v) for k, v in options.items()]
- )
-
- if kwargs:
- vol_string += "," + ",".join(
- ["{0}={1}".format(k, v) for k, v in kwargs.items()]
- )
- return {key: vol_string}
-
- def get_lxc_resource(self, vmid, hostname):
- if not vmid and not hostname:
- self.module.fail_json(msg="Either VMID or hostname must be provided.")
-
- if vmid:
- vm = self.get_lxc_resource_by_id(vmid)
- elif hostname:
- vm = self.get_lxc_resource_by_hostname(hostname)
-
- vmid = vm["vmid"]
- if vm["type"] != self.VZ_TYPE:
- identifier = self.format_vm_identifier(vmid, hostname)
- self.module.fail_json(
- msg="The specified VM %s is not an %s." % (identifier, self.VZ_TYPE)
- )
-
- return vm
-
- def get_lxc_resource_by_id(self, vmid):
- vms = self.get_vm_resources()
-
- vms = [vm for vm in vms if vm["vmid"] == vmid]
- if len(vms) == 0:
- raise LookupError("VM with VMID %d does not exist in cluster." % vmid)
-
- return vms[0]
-
- def get_lxc_resource_by_hostname(self, hostname):
- vms = self.get_vm_resources()
-
- vms = [vm for vm in vms if vm["name"] == hostname]
- if len(vms) == 0:
- raise LookupError(
- "VM with hostname %s does not exist in cluster." % hostname
- )
- elif len(vms) > 1:
- raise ValueError(
- "Multiple VMs found with hostname %s. Please specify VMID." % hostname
- )
-
- return vms[0]
-
- def get_vm_resources(self):
- try:
- return self.proxmox_api.cluster.resources.get(type="vm")
- except Exception as e:
- self.module.fail_json(
- msg="Unable to retrieve list of %s VMs from cluster resources: %s"
- % (self.VZ_TYPE, e)
- )
-
- def get_lxc_status(self, vmid, node_name):
- try:
- proxmox_node = self.proxmox_api.nodes(node_name)
- except Exception as e:
- self.module.fail_json(msg="Unable to retrieve node information: %s" % e)
- return getattr(proxmox_node, self.VZ_TYPE)(vmid).status.current.get()['status']
-
- def format_vm_identifier(self, vmid, hostname):
- if vmid and hostname:
- return "%s (%s)" % (hostname, vmid)
- elif hostname:
- return hostname
- else:
- return to_native(vmid)
-
- def handle_api_timeout(self, vmid, node, taskid, timeout, timeout_msg=""):
- if timeout_msg != "":
- timeout_msg = "%s " % timeout_msg
-
- while timeout > 0:
- if self.api_task_ok(node, taskid):
- return
- timeout -= 1
- time.sleep(1)
-
- self.module.fail_json(
- vmid=vmid,
- taskid=taskid,
- msg="%sLast line in task before timeout: %s"
- % (timeout_msg, self.proxmox_api.nodes(node).tasks(taskid).log.get()[:1]),
- )
-
- def is_template_container(self, node, target):
- """Check if the specified container is a template."""
- proxmox_node = self.proxmox_api.nodes(node)
- config = getattr(proxmox_node, self.VZ_TYPE)(target).config.get()
- return config.get("template", False)
-
- def content_check(self, node, ostemplate, template_store):
- """Check if the specified ostemplate is present in the specified storage."""
- proxmox_node = self.proxmox_api.nodes(node)
- storage_contents = proxmox_node.storage(template_store).content.get()
- return any(content["volid"] == ostemplate for content in storage_contents)
-
- def validate_tags(self, tags):
- """Check if the specified tags are valid."""
- re_tag = re.compile(r"^[a-zA-Z0-9_][a-zA-Z0-9_\-\+\.]*$")
- for tag in tags:
- if not re_tag.match(tag):
- self.module.fail_json(msg="%s is not a valid tag" % tag)
- return False
- return True
-
- def check_supported_features(self):
- for option, version in self.MINIMUM_VERSIONS.items():
- if self.version() < LooseVersion(version) and option in self.module.params:
- self.module.fail_json(
- changed=False,
- msg="Feature {option} is only supported in PVE {version}+, and you're using PVE {pve_version}".format(
- option=option, version=version, pve_version=self.version()
- ),
- )
-
-
-def isfloat(value):
- if value is None:
- return False
- try:
- float(value)
- return True
- except ValueError:
- return False
-
-
-def main():
- module = get_ansible_module()
- proxmox = ProxmoxLxcAnsible(module)
-
- try:
- proxmox.run()
- except Exception as e:
- module.fail_json(msg="An error occurred: %s" % to_native(e))
-
-
-if __name__ == "__main__":
- main()
diff --git a/plugins/modules/proxmox_backup.py b/plugins/modules/proxmox_backup.py
deleted file mode 100644
index 63e19c1d35..0000000000
--- a/plugins/modules/proxmox_backup.py
+++ /dev/null
@@ -1,570 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-#
-# Copyright (c) 2024, IamLunchbox
-# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-from __future__ import absolute_import, division, print_function
-
-__metaclass__ = type
-
-
-DOCUMENTATION = r"""
-module: proxmox_backup
-author: "Raphael Grieger (@IamLunchbox) "
-short_description: Start a VM backup in Proxmox VE cluster
-version_added: 10.1.0
-description:
- - Allows you to create backups of KVM and LXC guests in Proxmox VE cluster.
- - Offers the GUI functionality of creating a single backup as well as using the run-now functionality from the cluster backup
- schedule.
- - The mininum required privileges to use this module are C(VM.Backup) and C(Datastore.AllocateSpace) for the respective
- VMs and storage.
- - Most options are optional and if unspecified will be chosen by the Cluster and its default values.
- - Note that this module B(is not idempotent). It always starts a new backup (when not in check mode).
-attributes:
- check_mode:
- support: full
- diff_mode:
- support: none
-options:
- backup_mode:
- description:
- - The mode how Proxmox performs backups. The default is, to create a runtime snapshot including memory.
- - Check U(https://pve.proxmox.com/pve-docs/chapter-vzdump.html#_backup_modes) for an explanation of the differences.
- type: str
- choices: ["snapshot", "suspend", "stop"]
- default: snapshot
- bandwidth:
- description:
- - Limit the I/O bandwidth (in KiB/s) to write backup. V(0) is unlimited.
- type: int
- change_detection_mode:
- description:
- - Set the change detection mode (available from Proxmox VE 8.3).
- - It is only used when backing up containers, Proxmox silently ignores this option when applied to kvm guests.
- type: str
- choices: ["legacy", "data", "metadata"]
- compress:
- description:
- - Enable additional compression of the backup archive.
- - V(0) will use the Proxmox recommended value, depending on your storage target.
- type: str
- choices: ["0", "1", "gzip", "lzo", "zstd"]
- compression_threads:
- description:
- - The number of threads zstd will use to compress the backup.
- - V(0) uses 50% of the available cores, anything larger than V(0) will use exactly as many threads.
- - Is ignored if you specify O(compress=gzip) or O(compress=lzo).
- type: int
- description:
- description:
- - Specify the description of the backup.
- - Needs to be a single line, newline and backslash need to be escaped as V(\\n) and V(\\\\) respectively.
- - If you need variable interpolation, you can set the content as usual through ansible jinja templating and/or let Proxmox
- substitute templates.
- - Proxmox currently supports V({{cluster}}), V({{guestname}}), V({{node}}), and V({{vmid}}) as templating variables.
- Since this is also a jinja delimiter, you need to set these values as raw jinja.
- default: "{{guestname}}"
- type: str
- fleecing:
- description:
- - Enable backup fleecing. Works only for virtual machines and their disks.
- - Must be entered as a string, containing key-value pairs in a list.
- type: str
- mode:
- description:
- - Specifices the mode to select backup targets.
- choices: ["include", "all", "pool"]
- required: true
- type: str
- node:
- description:
- - Only execute the backup job for the given node.
- - This option is usually used if O(mode=all).
- - If you specify a node ID and your vmids or pool do not reside there, they will not be backed up!
- type: str
- notification_mode:
- description:
- - Determine which notification system to use.
- type: str
- choices: ["auto", "legacy-sendmail", "notification-system"]
- default: auto
- performance_tweaks:
- description:
- - Enable other performance-related settings.
- - Must be entered as a string, containing comma separated key-value pairs.
- - 'For example: V(max-workers=2,pbs-entries-max=2).'
- type: str
- pool:
- description:
- - Specify a pool name to limit backups to guests to the given pool.
- - Required, when O(mode=pool).
- - Also required, when your user only has VM.Backup permission for this single pool.
- type: str
- protected:
- description:
- - Marks backups as protected.
- - '"Might fail, when the PBS backend has verify enabled due to this bug: U(https://bugzilla.proxmox.com/show_bug.cgi?id=4289)".'
- type: bool
- retention:
- description:
- - Use custom retention options instead of those from the default cluster configuration (which is usually V("keep-all=1")).
- - Always requires Datastore.Allocate permission at the storage endpoint.
- - Specifying a retention time other than V(keep-all=1) might trigger pruning on the datastore, if an existing backup
- should be deleted due to your specified timeframe.
- - Deleting requires C(Datastore.Modify) or C(Datastore.Prune) permissions on the backup storage.
- type: str
- storage:
- description:
- - Store the backup archive on this storage.
- type: str
- required: true
- vmids:
- description:
- - The instance IDs to be backed up.
- - Only valid, if O(mode=include).
- type: list
- elements: int
- wait:
- description:
- - Wait for the backup to be finished.
- - Fails, if job does not succeed successfully within the given timeout.
- type: bool
- default: false
- wait_timeout:
- description:
- - Seconds to wait for the backup to be finished.
- - Will only be evaluated, if O(wait=true).
- type: int
- default: 10
-requirements: ["proxmoxer", "requests"]
-extends_documentation_fragment:
- - community.general.proxmox.actiongroup_proxmox
- - community.general.proxmox.documentation
- - community.general.attributes
-"""
-
-EXAMPLES = r"""
-- name: Backup all vms in the Proxmox cluster to storage mypbs
- community.general.proxmox_backup:
- api_user: root@pam
- api_password: secret
- api_host: node1
- storage: mypbs
- mode: all
-
-- name: Backup VMID 100 by stopping it and set an individual retention
- community.general.proxmox_backup:
- api_user: root@pam
- api_password: secret
- api_host: node1
- backup-mode: stop
- mode: include
- retention: keep-daily=5, keep-last=14, keep-monthly=4, keep-weekly=4, keep-yearly=0
- storage: mypbs
- vmid: [100]
-
-- name: Backup all vms on node node2 to storage mypbs and wait for the task to finish
- community.general.proxmox_backup:
- api_user: test@pve
- api_password: 1q2w3e
- api_host: node2
- storage: mypbs
- mode: all
- node: node2
- wait: true
- wait_timeout: 30
-
-- name: Use all the options
- community.general.proxmox_backup:
- api_user: root@pam
- api_password: secret
- api_host: node1
- bandwidth: 1000
- backup_mode: suspend
- compress: zstd
- compression_threads: 0
- description: A single backup for {% raw %}{{ guestname }}{% endraw %}
- mode: include
- notification_mode: notification-system
- protected: true
- retention: keep-monthly=1, keep-weekly=1
- storage: mypbs
- vmids:
- - 100
- - 101
-"""
-
-RETURN = r"""
-backups:
- description: List of nodes and their task IDs.
- returned: on success
- type: list
- elements: dict
- contains:
- node:
- description: Node ID.
- returned: on success
- type: str
- status:
- description: Last known task status. Will be unknown, if O(wait=false).
- returned: on success
- type: str
- choices: ["unknown", "success", "failed"]
- upid:
- description: >-
- Proxmox cluster UPID, which is needed to lookup task info. Returns OK, when a cluster node did not create a task after
- being called, for example due to no matching targets.
- returned: on success
- type: str
-"""
-
-import time
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible.module_utils.common.text.converters import to_native
-from ansible_collections.community.general.plugins.module_utils.proxmox import ProxmoxAnsible, proxmox_auth_argument_spec
-
-
-def has_permission(permission_tree, permission, search_scopes, default=0, expected=1):
- return any(permission_tree.get(scope, {}).get(permission, default) == expected for scope in search_scopes)
-
-
-class ProxmoxBackupAnsible(ProxmoxAnsible):
-
- def _get_permissions(self):
- return self.proxmox_api.access.permissions.get()
-
- def _get_resources(self, resource_type=None):
- return self.proxmox_api.cluster.resources.get(type=resource_type)
-
- def _get_tasklog(self, node, upid):
- return self.proxmox_api.nodes(node).tasks(upid).log.get()
-
- def _get_taskok(self, node, upid):
- return self.proxmox_api.nodes(node).tasks(upid).status.get()
-
- def _post_vzdump(self, node, request_body):
- return self.proxmox_api.nodes(node).vzdump.post(**request_body)
-
- def request_backup(
- self,
- request_body,
- node_endpoints):
- task_ids = []
-
- for node in node_endpoints:
- upid = self._post_vzdump(node, request_body)
- if upid != "OK":
- tasklog = ", ".join(logentry["t"] for logentry in self._get_tasklog(node, upid))
- else:
- tasklog = ""
- task_ids.extend([{"node": node, "upid": upid, "status": "unknown", "log": "%s" % tasklog}])
- return task_ids
-
- def check_relevant_nodes(self, node):
- nodes = [
- item["node"]
- for item in self._get_resources("node")
- if item["status"] == "online"
- ]
- if node and node not in nodes:
- self.module.fail_json(msg="Node %s was specified, but does not exist on the cluster" % node)
- elif node:
- return [node]
- return nodes
-
- def check_storage_permissions(
- self,
- permissions,
- storage,
- bandwidth,
- performance,
- retention):
- # Check for Datastore.AllocateSpace in the permission tree
- if not has_permission(permissions, "Datastore.AllocateSpace", search_scopes=["/", "/storage/", "/storage/" + storage]):
- self.module.fail_json(changed=False, msg="Insufficient permission: Datastore.AllocateSpace is missing")
-
- if (bandwidth or performance) and has_permission(permissions, "Sys.Modify", search_scopes=["/"], expected=0):
- self.module.fail_json(changed=False, msg="Insufficient permission: Performance_tweaks and bandwidth require 'Sys.Modify' permission for '/'")
-
- if retention:
- if not has_permission(permissions, "Datastore.Allocate", search_scopes=["/", "/storage", "/storage/" + storage]):
- self.module.fail_json(changed=False, msg="Insufficient permissions: Custom retention was requested, but Datastore.Allocate is missing")
-
- def check_vmid_backup_permission(self, permissions, vmids, pool):
- sufficient_permissions = has_permission(permissions, "VM.Backup", search_scopes=["/", "/vms"])
- if pool and not sufficient_permissions:
- sufficient_permissions = has_permission(permissions, "VM.Backup", search_scopes=["/pool/" + pool, "/pool/" + pool + "/vms"])
-
- if not sufficient_permissions:
- # Since VM.Backup can be given for each vmid at a time, iterate through all of them
- # and check, if the permission is set
- failed_vmids = []
- for vm in vmids:
- vm_path = "/vms/" + str(vm)
- if has_permission(permissions, "VM.Backup", search_scopes=[vm_path], default=1, expected=0):
- failed_vmids.append(str(vm))
- if failed_vmids:
- self.module.fail_json(
- changed=False, msg="Insufficient permissions: "
- "You dont have the VM.Backup permission for VMID %s" %
- ", ".join(failed_vmids))
- sufficient_permissions = True
- # Finally, when no check succeeded, fail
- if not sufficient_permissions:
- self.module.fail_json(changed=False, msg="Insufficient permissions: You do not have the VM.Backup permission")
-
- def check_general_backup_permission(self, permissions, pool):
- if not has_permission(permissions, "VM.Backup", search_scopes=["/", "/vms"] + (["/pool/" + pool] if pool else [])):
- self.module.fail_json(changed=False, msg="Insufficient permissions: You dont have the VM.Backup permission")
-
- def check_if_storage_exists(self, storage, node):
- storages = self.get_storages(type=None)
- # Loop through all cluster storages and get all matching storages
- validated_storagepath = [storageentry for storageentry in storages if storageentry["storage"] == storage]
- if not validated_storagepath:
- self.module.fail_json(
- changed=False,
- msg="Storage %s does not exist in the cluster" %
- storage)
-
- def check_vmids(self, vmids):
- cluster_vmids = [vm["vmid"] for vm in self._get_resources("vm")]
- if not cluster_vmids:
- self.module.warn(
- "VM.Audit permission is missing or there are no VMs. This task might fail if one VMID does not exist")
- return
- vmids_not_found = [str(vm) for vm in vmids if vm not in cluster_vmids]
- if vmids_not_found:
- self.module.warn(
- "VMIDs %s not found. This task will fail if one VMID does not exist" %
- ", ".join(vmids_not_found))
-
- def wait_for_timeout(self, timeout, raw_tasks):
-
- # filter all entries, which did not get a task id from the Cluster
- tasks = []
- ok_tasks = []
- for node in raw_tasks:
- if node["upid"] != "OK":
- tasks.append(node)
- else:
- ok_tasks.append(node)
-
- start_time = time.time()
- # iterate through the task ids and check their values
- while True:
- for node in tasks:
- if node["status"] == "unknown":
- try:
- # proxmox.api_task_ok does not suffice, since it only
- # is true at `stopped` and `ok`
- status = self._get_taskok(node["node"], node["upid"])
- if status["status"] == "stopped" and status["exitstatus"] == "OK":
- node["status"] = "success"
- if status["status"] == "stopped" and status["exitstatus"] == "job errors":
- node["status"] = "failed"
- except Exception as e:
- self.module.fail_json(msg="Unable to retrieve API task ID from node %s: %s" % (node["node"], e))
- if len([item for item in tasks if item["status"] != "unknown"]) == len(tasks):
- break
- if time.time() > start_time + timeout:
- timeouted_nodes = [
- node["node"]
- for node in tasks
- if node["status"] == "unknown"
- ]
- failed_nodes = [node["node"] for node in tasks if node["status"] == "failed"]
- if failed_nodes:
- self.module.fail_json(
- msg="Reached timeout while waiting for backup task. "
- "Nodes, who reached the timeout: %s. "
- "Nodes, which failed: %s" %
- (", ".join(timeouted_nodes), ", ".join(failed_nodes)))
- self.module.fail_json(
- msg="Reached timeout while waiting for creating VM snapshot. "
- "Nodes who reached the timeout: %s" %
- ", ".join(timeouted_nodes))
- time.sleep(1)
-
- error_logs = []
- for node in tasks:
- if node["status"] == "failed":
- tasklog = ", ".join([logentry["t"] for logentry in self._get_tasklog(node["node"], node["upid"])])
- error_logs.append("%s: %s" % (node, tasklog))
- if error_logs:
- self.module.fail_json(
- msg="An error occured creating the backups. "
- "These are the last log lines from the failed nodes: %s" %
- ", ".join(error_logs))
-
- for node in tasks:
- tasklog = ", ".join([logentry["t"] for logentry in self._get_tasklog(node["node"], node["upid"])])
- node["log"] = tasklog
-
- # Finally, reattach ok tasks to show, that all nodes were contacted
- tasks.extend(ok_tasks)
- return tasks
-
- def permission_check(
- self,
- storage,
- mode,
- node,
- bandwidth,
- performance_tweaks,
- retention,
- pool,
- vmids):
- permissions = self._get_permissions()
- self.check_if_storage_exists(storage, node)
- self.check_storage_permissions(
- permissions, storage, bandwidth, performance_tweaks, retention)
- if mode == "include":
- self.check_vmid_backup_permission(permissions, vmids, pool)
- else:
- self.check_general_backup_permission(permissions, pool)
-
- def prepare_request_parameters(self, module_arguments):
- # ensure only valid post parameters are passed to proxmox
- # list of dict items to replace with (new_val, old_val)
- post_params = [("bwlimit", "bandwidth"),
- ("compress", "compress"),
- ("fleecing", "fleecing"),
- ("mode", "backup_mode"),
- ("notes-template", "description"),
- ("notification-mode", "notification_mode"),
- ("pbs-change-detection-mode", "change_detection_mode"),
- ("performance", "performance_tweaks"),
- ("pool", "pool"),
- ("protected", "protected"),
- ("prune-backups", "retention"),
- ("storage", "storage"),
- ("zstd", "compression_threads"),
- ("vmid", "vmids")]
- request_body = {}
- for new, old in post_params:
- if module_arguments.get(old):
- request_body.update({new: module_arguments[old]})
-
- # Set mode specific values
- if module_arguments["mode"] == "include":
- request_body.pop("pool", None)
- request_body["all"] = 0
- elif module_arguments["mode"] == "all":
- request_body.pop("vmid", None)
- request_body.pop("pool", None)
- request_body["all"] = 1
- elif module_arguments["mode"] == "pool":
- request_body.pop("vmid", None)
- request_body["all"] = 0
-
- # Create comma separated list from vmids, the API expects so
- if request_body.get("vmid"):
- request_body.update({"vmid": ",".join(str(vmid) for vmid in request_body["vmid"])})
-
- # remove whitespaces from option strings
- for key in ("prune-backups", "performance"):
- if request_body.get(key):
- request_body[key] = request_body[key].replace(" ", "")
- # convert booleans to 0/1
- for key in ("protected",):
- if request_body.get(key):
- request_body[key] = 1
- return request_body
-
- def backup_create(
- self,
- module_arguments,
- check_mode,
- node_endpoints):
- request_body = self.prepare_request_parameters(module_arguments)
- # stop here, before anything gets changed
- if check_mode:
- return []
-
- task_ids = self.request_backup(request_body, node_endpoints)
- updated_task_ids = []
- if module_arguments["wait"]:
- updated_task_ids = self.wait_for_timeout(
- module_arguments["wait_timeout"], task_ids)
- return updated_task_ids if updated_task_ids else task_ids
-
-
-def main():
- module_args = proxmox_auth_argument_spec()
- backup_args = {
- "backup_mode": {"type": "str", "default": "snapshot", "choices": ["snapshot", "suspend", "stop"]},
- "bandwidth": {"type": "int"},
- "change_detection_mode": {"type": "str", "choices": ["legacy", "data", "metadata"]},
- "compress": {"type": "str", "choices": ["0", "1", "gzip", "lzo", "zstd"]},
- "compression_threads": {"type": "int"},
- "description": {"type": "str", "default": "{{guestname}}"},
- "fleecing": {"type": "str"},
- "mode": {"type": "str", "required": True, "choices": ["include", "all", "pool"]},
- "node": {"type": "str"},
- "notification_mode": {"type": "str", "default": "auto", "choices": ["auto", "legacy-sendmail", "notification-system"]},
- "performance_tweaks": {"type": "str"},
- "pool": {"type": "str"},
- "protected": {"type": "bool"},
- "retention": {"type": "str"},
- "storage": {"type": "str", "required": True},
- "vmids": {"type": "list", "elements": "int"},
- "wait": {"type": "bool", "default": False},
- "wait_timeout": {"type": "int", "default": 10}}
- module_args.update(backup_args)
-
- module = AnsibleModule(
- argument_spec=module_args,
- supports_check_mode=True,
- required_if=[
- ("mode", "include", ("vmids",), True),
- ("mode", "pool", ("pool",))
- ]
- )
- proxmox = ProxmoxBackupAnsible(module)
- bandwidth = module.params["bandwidth"]
- mode = module.params["mode"]
- node = module.params["node"]
- performance_tweaks = module.params["performance_tweaks"]
- pool = module.params["pool"]
- retention = module.params["retention"]
- storage = module.params["storage"]
- vmids = module.params["vmids"]
-
- proxmox.permission_check(
- storage,
- mode,
- node,
- bandwidth,
- performance_tweaks,
- retention,
- pool,
- vmids)
- if module.params["mode"] == "include":
- proxmox.check_vmids(module.params["vmids"])
- node_endpoints = proxmox.check_relevant_nodes(module.params["node"])
- try:
- result = proxmox.backup_create(module.params, module.check_mode, node_endpoints)
- except Exception as e:
- module.fail_json(msg="Creating backups failed with exception: %s" % to_native(e))
-
- if module.check_mode:
- module.exit_json(backups=result, changed=True, msg="Backups would be created")
-
- elif len([entry for entry in result if entry["upid"] == "OK"]) == len(result):
- module.exit_json(backups=result, changed=False, msg="Backup request sent to proxmox, no tasks created")
-
- elif module.params["wait"]:
- module.exit_json(backups=result, changed=True, msg="Backups succeeded")
-
- else:
- module.exit_json(backups=result, changed=True,
- msg="Backup tasks created")
-
-
-if __name__ == "__main__":
- main()
diff --git a/plugins/modules/proxmox_backup_info.py b/plugins/modules/proxmox_backup_info.py
deleted file mode 100644
index 0889239b37..0000000000
--- a/plugins/modules/proxmox_backup_info.py
+++ /dev/null
@@ -1,244 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-#
-# Copyright (c) 2024 Marzieh Raoufnezhad
-# Copyright (c) 2024 Maryam Mayabi
-# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-DOCUMENTATION = """
----
-module: proxmox_backup_info
-
-short_description: Retrieve information on Proxmox scheduled backups
-
-version_added: 10.3.0
-
-description:
- - Retrieve information such as backup times, VM name, VM ID, mode, backup type, and backup schedule using the Proxmox Server API.
-
-author:
- - "Marzieh Raoufnezhad (@raoufnezhad) "
- - "Maryam Mayabi (@mmayabi) "
-
-options:
- vm_name:
- description:
- - The name of the Proxmox VM.
- - If defined, the returned list will contain backup jobs that have been parsed and filtered based on O(vm_name) value.
- - Mutually exclusive with O(vm_id) and O(backup_jobs).
- type: str
- vm_id:
- description:
- - The ID of the Proxmox VM.
- - If defined, the returned list will contain backup jobs that have been parsed and filtered based on O(vm_id) value.
- - Mutually exclusive with O(vm_name) and O(backup_jobs).
- type: str
- backup_jobs:
- description:
- - If V(true), the module will return all backup jobs information.
- - If V(false), the module will parse all backup jobs based on VM IDs and return a list of VMs' backup information.
- - Mutually exclusive with O(vm_id) and O(vm_name).
- default: false
- type: bool
-
-extends_documentation_fragment:
- - community.general.proxmox.documentation
- - community.general.attributes
- - community.general.attributes.info_module
- - community.general.proxmox.actiongroup_proxmox
-"""
-
-EXAMPLES = """
-- name: Print all backup information by VM ID and VM name
- community.general.proxmox_backup_info:
- api_user: 'myUser@pam'
- api_password: '*******'
- api_host: '192.168.20.20'
-
-- name: Print Proxmox backup information for a specific VM based on its name
- community.general.proxmox_backup_info:
- api_user: 'myUser@pam'
- api_password: '*******'
- api_host: '192.168.20.20'
- vm_name: 'mailsrv'
-
-- name: Print Proxmox backup information for a specific VM based on its VM ID
- community.general.proxmox_backup_info:
- api_user: 'myUser@pam'
- api_password: '*******'
- api_host: '192.168.20.20'
- vm_id: '150'
-
-- name: Print Proxmox all backup job information
- community.general.proxmox_backup_info:
- api_user: 'myUser@pam'
- api_password: '*******'
- api_host: '192.168.20.20'
- backup_jobs: true
-"""
-
-RETURN = """
----
-backup_info:
- description: The return value provides backup job information based on VM ID or VM name, or total backup job information.
- returned: on success, but can be empty
- type: list
- elements: dict
- contains:
- bktype:
- description: The type of the backup.
- returned: on success
- type: str
- sample: vzdump
- enabled:
- description: V(1) if backup is enabled else V(0).
- returned: on success
- type: int
- sample: 1
- id:
- description: The backup job ID.
- returned: on success
- type: str
- sample: backup-83831498-c631
- mode:
- description: The backup job mode such as snapshot.
- returned: on success
- type: str
- sample: snapshot
- next-run:
- description: The next backup time.
- returned: on success
- type: str
- sample: "2024-12-28 11:30:00"
- schedule:
- description: The backup job schedule.
- returned: on success
- type: str
- sample: "sat 15:00"
- storage:
- description: The backup storage location.
- returned: on success
- type: str
- sample: local
- vm_name:
- description: The VM name.
- returned: on success
- type: str
- sample: test01
- vmid:
- description: The VM ID.
- returned: on success
- type: str
- sample: "100"
-"""
-
-from datetime import datetime
-from ansible.module_utils.basic import AnsibleModule, missing_required_lib
-from ansible_collections.community.general.plugins.module_utils.proxmox import (
- proxmox_auth_argument_spec, ProxmoxAnsible, HAS_PROXMOXER, PROXMOXER_IMP_ERR)
-
-
-class ProxmoxBackupInfoAnsible(ProxmoxAnsible):
-
- # Get all backup information
- def get_jobs_list(self):
- try:
- backupJobs = self.proxmox_api.cluster.backup.get()
- except Exception as e:
- self.module.fail_json(msg="Getting backup jobs failed: %s" % e)
- return backupJobs
-
- # Get VM information
- def get_vms_list(self):
- try:
- vms = self.proxmox_api.cluster.resources.get(type='vm')
- except Exception as e:
- self.module.fail_json(msg="Getting VMs info from cluster failed: %s" % e)
- return vms
-
- # Get all backup information by VM ID and VM name
- def vms_backup_info(self):
- backupList = self.get_jobs_list()
- vmInfo = self.get_vms_list()
- bkInfo = []
- for backupItem in backupList:
- nextrun = datetime.fromtimestamp(backupItem['next-run'])
- vmids = backupItem['vmid'].split(',')
- for vmid in vmids:
- for vm in vmInfo:
- if vm['vmid'] == int(vmid):
- vmName = vm['name']
- break
- bkInfoData = {'id': backupItem['id'],
- 'schedule': backupItem['schedule'],
- 'storage': backupItem['storage'],
- 'mode': backupItem['mode'],
- 'next-run': nextrun.strftime("%Y-%m-%d %H:%M:%S"),
- 'enabled': backupItem['enabled'],
- 'bktype': backupItem['type'],
- 'vmid': vmid,
- 'vm_name': vmName}
- bkInfo.append(bkInfoData)
- return bkInfo
-
- # Get proxmox backup information for a specific VM based on its VM ID or VM name
- def specific_vmbackup_info(self, vm_name_id):
- fullBackupInfo = self.vms_backup_info()
- vmBackupJobs = []
- for vm in fullBackupInfo:
- if (vm["vm_name"] == vm_name_id or vm["vmid"] == vm_name_id):
- vmBackupJobs.append(vm)
- return vmBackupJobs
-
-
-def main():
- # Define module args
- args = proxmox_auth_argument_spec()
- backup_info_args = dict(
- vm_id=dict(type='str'),
- vm_name=dict(type='str'),
- backup_jobs=dict(type='bool', default=False)
- )
- args.update(backup_info_args)
-
- module = AnsibleModule(
- argument_spec=args,
- mutually_exclusive=[('backup_jobs', 'vm_id', 'vm_name')],
- supports_check_mode=True
- )
-
- # Define (init) result value
- result = dict(
- changed=False
- )
-
- # Check if proxmoxer exist
- if not HAS_PROXMOXER:
- module.fail_json(msg=missing_required_lib('proxmoxer'), exception=PROXMOXER_IMP_ERR)
-
- # Start to connect to proxmox to get backup data
- proxmox = ProxmoxBackupInfoAnsible(module)
- vm_id = module.params['vm_id']
- vm_name = module.params['vm_name']
- backup_jobs = module.params['backup_jobs']
-
- # Update result value based on what requested (module args)
- if backup_jobs:
- result['backup_info'] = proxmox.get_jobs_list()
- elif vm_id:
- result['backup_info'] = proxmox.specific_vmbackup_info(vm_id)
- elif vm_name:
- result['backup_info'] = proxmox.specific_vmbackup_info(vm_name)
- else:
- result['backup_info'] = proxmox.vms_backup_info()
-
- # Return result value
- module.exit_json(**result)
-
-
-if __name__ == '__main__':
- main()
diff --git a/plugins/modules/proxmox_disk.py b/plugins/modules/proxmox_disk.py
deleted file mode 100644
index 75eb0001e6..0000000000
--- a/plugins/modules/proxmox_disk.py
+++ /dev/null
@@ -1,877 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-#
-# Copyright (c) 2022, Castor Sky (@castorsky)
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-DOCUMENTATION = r"""
-module: proxmox_disk
-short_description: Management of a disk of a Qemu(KVM) VM in a Proxmox VE cluster
-version_added: 5.7.0
-description:
- - Allows you to perform some supported operations on a disk in Qemu(KVM) Virtual Machines in a Proxmox VE cluster.
-author: "Castor Sky (@castorsky) "
-attributes:
- check_mode:
- support: none
- diff_mode:
- support: none
- action_group:
- version_added: 9.0.0
-options:
- name:
- description:
- - The unique name of the VM.
- - You can specify either O(name) or O(vmid) or both of them.
- type: str
- vmid:
- description:
- - The unique ID of the VM.
- - You can specify either O(vmid) or O(name) or both of them.
- type: int
- disk:
- description:
- - The disk key (V(unused[n]), V(ide[n]), V(sata[n]), V(scsi[n]) or V(virtio[n])) you want to operate on.
- - Disk buses (IDE, SATA and so on) have fixed ranges of V(n) that accepted by Proxmox API.
- - 'For IDE: 0-3; for SCSI: 0-30; for SATA: 0-5; for VirtIO: 0-15; for Unused: 0-255.'
- type: str
- required: true
- state:
- description:
- - Indicates desired state of the disk.
- - O(state=present) can be used to create, replace disk or update options in existing disk. It will create missing disk
- or update options in existing one by default. See the O(create) parameter description to control behavior of this
- option.
- - Some updates on options (like O(cache)) are not being applied instantly and require VM restart.
- - Use O(state=detached) to detach existing disk from VM but do not remove it entirely. When O(state=detached) and disk
- is V(unused[n]) it will be left in same state (not removed).
- - O(state=moved) may be used to change backing storage for the disk in bounds of the same VM or to send the disk to
- another VM (using the same backing storage).
- - O(state=resized) intended to change the disk size. As of Proxmox 7.2 you can only increase the disk size because shrinking
- disks is not supported by the PVE API and has to be done manually.
- - To entirely remove the disk from backing storage use O(state=absent).
- type: str
- choices: ['present', 'resized', 'detached', 'moved', 'absent']
- default: present
- create:
- description:
- - With O(create) flag you can control behavior of O(state=present).
- - When O(create=disabled) it will not create new disk (if not exists) but will update options in existing disk.
- - When O(create=regular) it will either create new disk (if not exists) or update options in existing disk.
- - When O(create=forced) it will always create new disk (if disk exists it will be detached and left unused).
- type: str
- choices: ['disabled', 'regular', 'forced']
- default: regular
- storage:
- description:
- - The drive's backing storage.
- - Used only when O(state) is V(present).
- type: str
- size:
- description:
- - Desired volume size in GB to allocate when O(state=present) (specify O(size) without suffix).
- - New (or additional) size of volume when O(state=resized). With the V(+) sign the value is added to the actual size
- of the volume and without it, the value is taken as an absolute one.
- type: str
- bwlimit:
- description:
- - Override I/O bandwidth limit (in KB/s).
- - Used only when O(state=moved).
- type: int
- delete_moved:
- description:
- - Delete the original disk after successful copy.
- - By default the original disk is kept as unused disk.
- - Used only when O(state=moved).
- type: bool
- target_disk:
- description:
- - The config key the disk will be moved to on the target VM (for example, V(ide0) or V(scsi1)).
- - Default is the source disk key.
- - Used only when O(state=moved).
- type: str
- target_storage:
- description:
- - Move the disk to this storage when O(state=moved).
- - You can move between storages only in scope of one VM.
- - Mutually exclusive with O(target_vmid).
- - Consider increasing O(timeout) in case of large disk images or slow storage backend.
- type: str
- target_vmid:
- description:
- - The (unique) ID of the VM where disk will be placed when O(state=moved).
- - You can move disk between VMs only when the same storage is used.
- - Mutually exclusive with O(target_vmid).
- type: int
- timeout:
- description:
- - Timeout in seconds to wait for slow operations such as importing disk or moving disk between storages.
- - Used only when O(state) is V(present) or V(moved).
- type: int
- default: 600
- aio:
- description:
- - AIO type to use.
- type: str
- choices: ['native', 'threads', 'io_uring']
- backup:
- description:
- - Whether the drive should be included when making backups.
- type: bool
- bps_max_length:
- description:
- - Maximum length of total r/w I/O bursts in seconds.
- type: int
- bps_rd_max_length:
- description:
- - Maximum length of read I/O bursts in seconds.
- type: int
- bps_wr_max_length:
- description:
- - Maximum length of write I/O bursts in seconds.
- type: int
- cache:
- description:
- - The drive's cache mode.
- type: str
- choices: ['none', 'writethrough', 'writeback', 'unsafe', 'directsync']
- cyls:
- description:
- - Force the drive's physical geometry to have a specific cylinder count.
- type: int
- detect_zeroes:
- description:
- - Control whether to detect and try to optimize writes of zeroes.
- type: bool
- discard:
- description:
- - Control whether to pass discard/trim requests to the underlying storage.
- type: str
- choices: ['ignore', 'on']
- format:
- description:
- - The drive's backing file's data format.
- type: str
- choices: ['raw', 'cow', 'qcow', 'qed', 'qcow2', 'vmdk', 'cloop']
- heads:
- description:
- - Force the drive's physical geometry to have a specific head count.
- type: int
- import_from:
- description:
- - Import volume from this existing one.
- - Volume string format.
- - V(:/) or V(/).
- - Attention! Only root can use absolute paths.
- - This parameter is mutually exclusive with O(size).
- - Increase O(timeout) parameter when importing large disk images or using slow storage.
- type: str
- iops:
- description:
- - Maximum total r/w I/O in operations per second.
- - You can specify either total limit or per operation (mutually exclusive with O(iops_rd) and O(iops_wr)).
- type: int
- iops_max:
- description:
- - Maximum unthrottled total r/w I/O pool in operations per second.
- type: int
- iops_max_length:
- description:
- - Maximum length of total r/w I/O bursts in seconds.
- type: int
- iops_rd:
- description:
- - Maximum read I/O in operations per second.
- - You can specify either read or total limit (mutually exclusive with O(iops)).
- type: int
- iops_rd_max:
- description:
- - Maximum unthrottled read I/O pool in operations per second.
- type: int
- iops_rd_max_length:
- description:
- - Maximum length of read I/O bursts in seconds.
- type: int
- iops_wr:
- description:
- - Maximum write I/O in operations per second.
- - You can specify either write or total limit (mutually exclusive with O(iops)).
- type: int
- iops_wr_max:
- description:
- - Maximum unthrottled write I/O pool in operations per second.
- type: int
- iops_wr_max_length:
- description:
- - Maximum length of write I/O bursts in seconds.
- type: int
- iothread:
- description:
- - Whether to use iothreads for this drive (only for SCSI and VirtIO).
- type: bool
- mbps:
- description:
- - Maximum total r/w speed in megabytes per second.
- - Can be fractional but use with caution - fractionals less than 1 are not supported officially.
- - You can specify either total limit or per operation (mutually exclusive with O(mbps_rd) and O(mbps_wr)).
- type: float
- mbps_max:
- description:
- - Maximum unthrottled total r/w pool in megabytes per second.
- type: float
- mbps_rd:
- description:
- - Maximum read speed in megabytes per second.
- - You can specify either read or total limit (mutually exclusive with O(mbps)).
- type: float
- mbps_rd_max:
- description:
- - Maximum unthrottled read pool in megabytes per second.
- type: float
- mbps_wr:
- description:
- - Maximum write speed in megabytes per second.
- - You can specify either write or total limit (mutually exclusive with O(mbps)).
- type: float
- mbps_wr_max:
- description:
- - Maximum unthrottled write pool in megabytes per second.
- type: float
- media:
- description:
- - The drive's media type.
- type: str
- choices: ['cdrom', 'disk']
- iso_image:
- description:
- - The ISO image to be mounted on the specified in O(disk) CD-ROM.
- - O(media=cdrom) needs to be specified for this option to work.
- - Use V(:iso/) to mount ISO.
- - Use V(cdrom) to access the physical CD/DVD drive.
- - Use V(none) to unmount image from existent CD-ROM or create empty CD-ROM drive.
- type: str
- version_added: 8.1.0
- queues:
- description:
- - Number of queues (SCSI only).
- type: int
- replicate:
- description:
- - Whether the drive should considered for replication jobs.
- type: bool
- rerror:
- description:
- - Read error action.
- type: str
- choices: ['ignore', 'report', 'stop']
- ro:
- description:
- - Whether the drive is read-only.
- type: bool
- scsiblock:
- description:
- - Whether to use scsi-block for full passthrough of host block device.
- - Can lead to I/O errors in combination with low memory or high memory fragmentation on host.
- type: bool
- secs:
- description:
- - Force the drive's physical geometry to have a specific sector count.
- type: int
- serial:
- description:
- - The drive's reported serial number, url-encoded, up to 20 bytes long.
- type: str
- shared:
- description:
- - Mark this locally-managed volume as available on all nodes.
- - This option does not share the volume automatically, it assumes it is shared already!
- type: bool
- snapshot:
- description:
- - Control qemu's snapshot mode feature.
- - If activated, changes made to the disk are temporary and will be discarded when the VM is shutdown.
- type: bool
- ssd:
- description:
- - Whether to expose this drive as an SSD, rather than a rotational hard disk.
- type: bool
- trans:
- description:
- - Force disk geometry bios translation mode.
- type: str
- choices: ['auto', 'lba', 'none']
- werror:
- description:
- - Write error action.
- type: str
- choices: ['enospc', 'ignore', 'report', 'stop']
- wwn:
- description:
- - The drive's worldwide name, encoded as 16 bytes hex string, prefixed by V(0x).
- type: str
-extends_documentation_fragment:
- - community.general.proxmox.actiongroup_proxmox
- - community.general.proxmox.documentation
- - community.general.attributes
-"""
-
-EXAMPLES = r"""
-- name: Create new disk in VM (do not rewrite in case it exists already)
- community.general.proxmox_disk:
- api_host: node1
- api_user: root@pam
- api_token_id: token1
- api_token_secret: some-token-data
- name: vm-name
- disk: scsi3
- backup: true
- cache: none
- storage: local-zfs
- size: 5
- state: present
-
-- name: Create new disk in VM (force rewrite in case it exists already)
- community.general.proxmox_disk:
- api_host: node1
- api_user: root@pam
- api_token_id: token1
- api_token_secret: some-token-data
- vmid: 101
- disk: scsi3
- format: qcow2
- storage: local
- size: 16
- create: forced
- state: present
-
-- name: Update existing disk
- community.general.proxmox_disk:
- api_host: node1
- api_user: root@pam
- api_token_id: token1
- api_token_secret: some-token-data
- vmid: 101
- disk: ide0
- backup: false
- ro: true
- aio: native
- state: present
-
-- name: Grow existing disk
- community.general.proxmox_disk:
- api_host: node1
- api_user: root@pam
- api_token_id: token1
- api_token_secret: some-token-data
- vmid: 101
- disk: sata4
- size: +5G
- state: resized
-
-- name: Detach disk (leave it unused)
- community.general.proxmox_disk:
- api_host: node1
- api_user: root@pam
- api_token_id: token1
- api_token_secret: some-token-data
- name: vm-name
- disk: virtio0
- state: detached
-
-- name: Move disk to another storage
- community.general.proxmox_disk:
- api_host: node1
- api_user: root@pam
- api_password: secret
- vmid: 101
- disk: scsi7
- target_storage: local
- format: qcow2
- state: moved
-
-- name: Move disk from one VM to another
- community.general.proxmox_disk:
- api_host: node1
- api_user: root@pam
- api_token_id: token1
- api_token_secret: some-token-data
- vmid: 101
- disk: scsi7
- target_vmid: 201
- state: moved
-
-- name: Remove disk permanently
- community.general.proxmox_disk:
- api_host: node1
- api_user: root@pam
- api_password: secret
- vmid: 101
- disk: scsi4
- state: absent
-
-- name: Mount ISO image on CD-ROM (create drive if missing)
- community.general.proxmox_disk:
- api_host: node1
- api_user: root@pam
- api_token_id: token1
- api_token_secret: some-token-data
- vmid: 101
- disk: ide2
- media: cdrom
- iso_image: local:iso/favorite_distro_amd64.iso
- state: present
-"""
-
-RETURN = r"""
-vmid:
- description: The VM vmid.
- returned: success
- type: int
- sample: 101
-msg:
- description: A short message on what the module did.
- returned: always
- type: str
- sample: "Disk scsi3 created in VM 101"
-"""
-
-from ansible.module_utils.basic import AnsibleModule
-
-from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
-from ansible_collections.community.general.plugins.module_utils.proxmox import (proxmox_auth_argument_spec,
- ProxmoxAnsible)
-from re import compile, match, sub
-
-
-def disk_conf_str_to_dict(config_string):
- """
- Transform Proxmox configuration string for disk element into dictionary which has
- volume option parsed in '{ storage }:{ volume }' format and other options parsed
- in '{ option }={ value }' format. This dictionary will be compared afterward with
- attributes that user passed to this module in playbook.\n
- config_string examples:
- - local-lvm:vm-100-disk-0,ssd=1,discard=on,size=25G
- - local:iso/new-vm-ignition.iso,media=cdrom,size=70k
- - none,media=cdrom
- :param config_string: Retrieved from Proxmox API configuration string
- :return: Dictionary with volume option divided into parts ('volume_name', 'storage_name', 'volume') \n
- and other options as key:value.
- """
- config = config_string.split(',')
-
- # When empty CD-ROM drive present, the volume part of config string is "none".
- storage_volume = config.pop(0)
- if storage_volume in ["none", "cdrom"]:
- config_current = dict(
- volume=storage_volume,
- storage_name=None,
- volume_name=None,
- size=None,
- )
- else:
- storage_volume = storage_volume.split(':')
- storage_name = storage_volume[0]
- volume_name = storage_volume[1]
- config_current = dict(
- volume='%s:%s' % (storage_name, volume_name),
- storage_name=storage_name,
- volume_name=volume_name,
- )
-
- config.sort()
- for option in config:
- k, v = option.split('=')
- config_current[k] = v
-
- return config_current
-
-
-class ProxmoxDiskAnsible(ProxmoxAnsible):
- create_update_fields = [
- 'aio', 'backup', 'bps_max_length', 'bps_rd_max_length', 'bps_wr_max_length',
- 'cache', 'cyls', 'detect_zeroes', 'discard', 'format', 'heads', 'import_from', 'iops', 'iops_max',
- 'iops_max_length', 'iops_rd', 'iops_rd_max', 'iops_rd_max_length', 'iops_wr', 'iops_wr_max',
- 'iops_wr_max_length', 'iothread', 'mbps', 'mbps_max', 'mbps_rd', 'mbps_rd_max', 'mbps_wr', 'mbps_wr_max',
- 'media', 'queues', 'replicate', 'rerror', 'ro', 'scsiblock', 'secs', 'serial', 'shared', 'snapshot',
- 'ssd', 'trans', 'werror', 'wwn'
- ]
- supported_bus_num_ranges = dict(
- ide=range(0, 4),
- scsi=range(0, 31),
- sata=range(0, 6),
- virtio=range(0, 16),
- unused=range(0, 256)
- )
-
- def get_create_attributes(self):
- # Sanitize parameters dictionary:
- # - Remove not defined args
- # - Ensure True and False converted to int.
- # - Remove unnecessary parameters
- params = {
- k: int(v) if isinstance(v, bool) else v
- for k, v in self.module.params.items()
- if v is not None and k in self.create_update_fields
- }
- return params
-
- def create_disk(self, disk, vmid, vm, vm_config):
- """Create a disk in the specified virtual machine. Check if creation is required,
- and if so, compile the disk configuration and create it by updating the virtual
- machine configuration. After calling the API function, wait for the result.
-
- :param disk: ID of the disk in format "".
- :param vmid: ID of the virtual machine where the disk will be created.
- :param vm: Name of the virtual machine where the disk will be created.
- :param vm_config: Configuration of the virtual machine.
- :return: (bool, string) Whether the task was successful or not
- and the message to return to Ansible.
- """
- create = self.module.params['create']
- if create == 'disabled' and disk not in vm_config:
- # NOOP
- return False, "Disk %s not found in VM %s and creation was disabled in parameters." % (disk, vmid)
-
- timeout_str = "Reached timeout. Last line in task before timeout: %s"
- if (create == 'regular' and disk not in vm_config) or (create == 'forced'):
- # CREATE
- playbook_config = self.get_create_attributes()
- import_string = playbook_config.pop('import_from', None)
- iso_image = self.module.params.get('iso_image', None)
-
- if import_string:
- # When 'import_from' option is present in task options.
- config_str = "%s:%s,import-from=%s" % (self.module.params["storage"], "0", import_string)
- timeout_str = "Reached timeout while importing VM disk. Last line in task before timeout: %s"
- ok_str = "Disk %s imported into VM %s"
- elif iso_image is not None:
- # disk=, media=cdrom, iso_image=
- config_str = iso_image
- ok_str = "CD-ROM was created on %s bus in VM %s"
- else:
- config_str = self.module.params["storage"]
- if not config_str:
- self.module.fail_json(msg="The storage option must be specified.")
- if self.module.params.get("media") != "cdrom":
- config_str += ":%s" % (self.module.params["size"])
- ok_str = "Disk %s created in VM %s"
- timeout_str = "Reached timeout while creating VM disk. Last line in task before timeout: %s"
-
- for k, v in playbook_config.items():
- config_str += ',%s=%s' % (k, v)
-
- disk_config_to_apply = {self.module.params["disk"]: config_str}
-
- if create in ['disabled', 'regular'] and disk in vm_config:
- # UPDATE
- ok_str = "Disk %s updated in VM %s"
- iso_image = self.module.params.get('iso_image', None)
-
- proxmox_config = disk_conf_str_to_dict(vm_config[disk])
- # 'import_from' fails on disk updates
- playbook_config = self.get_create_attributes()
- playbook_config.pop('import_from', None)
-
- # Begin composing configuration string
- if iso_image is not None:
- config_str = iso_image
- else:
- config_str = proxmox_config["volume"]
- # Append all mandatory fields from playbook_config
- for k, v in playbook_config.items():
- config_str += ',%s=%s' % (k, v)
-
- # Append to playbook_config fields which are constants for disk images
- for option in ['size', 'storage_name', 'volume', 'volume_name']:
- playbook_config.update({option: proxmox_config[option]})
- # CD-ROM is special disk device and its disk image is subject to change
- if iso_image is not None:
- playbook_config['volume'] = iso_image
- # Values in params are numbers, but strings are needed to compare with disk_config
- playbook_config = {k: str(v) for k, v in playbook_config.items()}
-
- # Now compare old and new config to detect if changes are needed
- if proxmox_config == playbook_config:
- return False, "Disk %s is up to date in VM %s" % (disk, vmid)
-
- disk_config_to_apply = {self.module.params["disk"]: config_str}
-
- current_task_id = self.proxmox_api.nodes(vm['node']).qemu(vmid).config.post(**disk_config_to_apply)
- task_success, fail_reason = self.api_task_complete(vm['node'], current_task_id, self.module.params['timeout'])
-
- if task_success:
- return True, ok_str % (disk, vmid)
- else:
- if fail_reason == ProxmoxAnsible.TASK_TIMED_OUT:
- self.module.fail_json(
- msg=timeout_str % self.proxmox_api.nodes(vm['node']).tasks(current_task_id).log.get()[:1]
- )
- else:
- self.module.fail_json(msg="Error occurred on task execution: %s" % fail_reason)
-
- def move_disk(self, disk, vmid, vm, vm_config):
- """Call the `move_disk` API function that moves the disk to another storage and wait for the result.
-
- :param disk: ID of disk in format "".
- :param vmid: ID of virtual machine which disk will be moved.
- :param vm: Name of virtual machine which disk will be moved.
- :param vm_config: Virtual machine configuration.
- :return: (bool, string) Whether the task was successful or not
- and the message to return to Ansible.
- """
- disk_config = disk_conf_str_to_dict(vm_config[disk])
- disk_storage = disk_config["storage_name"]
-
- params = dict()
- params['disk'] = disk
- params['vmid'] = vmid
- params['bwlimit'] = self.module.params['bwlimit']
- params['storage'] = self.module.params['target_storage']
- params['target-disk'] = self.module.params['target_disk']
- params['target-vmid'] = self.module.params['target_vmid']
- params['format'] = self.module.params['format']
- params['delete'] = 1 if self.module.params.get('delete_moved', False) else 0
- # Remove not defined args
- params = {k: v for k, v in params.items() if v is not None}
-
- if params.get('storage', False):
- # Check if the disk is already in the target storage.
- disk_config = disk_conf_str_to_dict(vm_config[disk])
- if params['storage'] == disk_config['storage_name']:
- return False, "Disk %s already at %s storage" % (disk, disk_storage)
-
- current_task_id = self.proxmox_api.nodes(vm['node']).qemu(vmid).move_disk.post(**params)
- task_success, fail_reason = self.api_task_complete(vm['node'], current_task_id, self.module.params['timeout'])
-
- if task_success:
- return True, "Disk %s moved from VM %s storage %s" % (disk, vmid, disk_storage)
- else:
- if fail_reason == ProxmoxAnsible.TASK_TIMED_OUT:
- self.module.fail_json(
- msg='Reached timeout while waiting for moving VM disk. Last line in task before timeout: %s' %
- self.proxmox_api.nodes(vm['node']).tasks(current_task_id).log.get()[:1]
- )
- else:
- self.module.fail_json(msg="Error occurred on task execution: %s" % fail_reason)
-
- def resize_disk(self, disk, vmid, vm, vm_config):
- """Call the `resize` API function to change the disk size and wait for the result.
-
- :param disk: ID of disk in format "".
- :param vmid: ID of virtual machine which disk will be resized.
- :param vm: Name of virtual machine which disk will be resized.
- :param vm_config: Virtual machine configuration.
- :return: (Bool, string) Whether the task was successful or not
- and the message to return to Ansible.
- """
- size = self.module.params['size']
- if not match(r'^\+?\d+(\.\d+)?[KMGT]?$', size):
- self.module.fail_json(msg="Unrecognized size pattern for disk %s: %s" % (disk, size))
- disk_config = disk_conf_str_to_dict(vm_config[disk])
- actual_size = disk_config['size']
- if size == actual_size:
- return False, "Disk %s is already %s size" % (disk, size)
-
- # Resize disk API endpoint has changed at v8.0: PUT method become async.
- version = self.version()
- pve_major_version = 3 if version < LooseVersion('4.0') else version.version[0]
- if pve_major_version >= 8:
- current_task_id = self.proxmox_api.nodes(vm['node']).qemu(vmid).resize.set(disk=disk, size=size)
- task_success, fail_reason = self.api_task_complete(vm['node'], current_task_id, self.module.params['timeout'])
- if task_success:
- return True, "Disk %s resized in VM %s" % (disk, vmid)
- else:
- if fail_reason == ProxmoxAnsible.TASK_TIMED_OUT:
- self.module.fail_json(
- msg="Reached timeout while resizing disk. Last line in task before timeout: %s" %
- self.proxmox_api.nodes(vm['node']).tasks(current_task_id).log.get()[:1]
- )
- else:
- self.module.fail_json(msg="Error occurred on task execution: %s" % fail_reason)
- else:
- self.proxmox_api.nodes(vm['node']).qemu(vmid).resize.set(disk=disk, size=size)
- return True, "Disk %s resized in VM %s" % (disk, vmid)
-
-
-def main():
- module_args = proxmox_auth_argument_spec()
- disk_args = dict(
- # Proxmox native parameters
- aio=dict(type='str', choices=['native', 'threads', 'io_uring']),
- backup=dict(type='bool'),
- bps_max_length=dict(type='int'),
- bps_rd_max_length=dict(type='int'),
- bps_wr_max_length=dict(type='int'),
- cache=dict(type='str', choices=['none', 'writethrough', 'writeback', 'unsafe', 'directsync']),
- cyls=dict(type='int'),
- detect_zeroes=dict(type='bool'),
- discard=dict(type='str', choices=['ignore', 'on']),
- format=dict(type='str', choices=['raw', 'cow', 'qcow', 'qed', 'qcow2', 'vmdk', 'cloop']),
- heads=dict(type='int'),
- import_from=dict(type='str'),
- iops=dict(type='int'),
- iops_max=dict(type='int'),
- iops_max_length=dict(type='int'),
- iops_rd=dict(type='int'),
- iops_rd_max=dict(type='int'),
- iops_rd_max_length=dict(type='int'),
- iops_wr=dict(type='int'),
- iops_wr_max=dict(type='int'),
- iops_wr_max_length=dict(type='int'),
- iothread=dict(type='bool'),
- iso_image=dict(type='str'),
- mbps=dict(type='float'),
- mbps_max=dict(type='float'),
- mbps_rd=dict(type='float'),
- mbps_rd_max=dict(type='float'),
- mbps_wr=dict(type='float'),
- mbps_wr_max=dict(type='float'),
- media=dict(type='str', choices=['cdrom', 'disk']),
- queues=dict(type='int'),
- replicate=dict(type='bool'),
- rerror=dict(type='str', choices=['ignore', 'report', 'stop']),
- ro=dict(type='bool'),
- scsiblock=dict(type='bool'),
- secs=dict(type='int'),
- serial=dict(type='str'),
- shared=dict(type='bool'),
- snapshot=dict(type='bool'),
- ssd=dict(type='bool'),
- trans=dict(type='str', choices=['auto', 'lba', 'none']),
- werror=dict(type='str', choices=['enospc', 'ignore', 'report', 'stop']),
- wwn=dict(type='str'),
-
- # Disk moving relates parameters
- bwlimit=dict(type='int'),
- target_storage=dict(type='str'),
- target_disk=dict(type='str'),
- target_vmid=dict(type='int'),
- delete_moved=dict(type='bool'),
- timeout=dict(type='int', default='600'),
-
- # Module related parameters
- name=dict(type='str'),
- vmid=dict(type='int'),
- disk=dict(type='str', required=True),
- storage=dict(type='str'),
- size=dict(type='str'),
- state=dict(type='str', choices=['present', 'resized', 'detached', 'moved', 'absent'],
- default='present'),
- create=dict(type='str', choices=['disabled', 'regular', 'forced'], default='regular'),
- )
-
- module_args.update(disk_args)
-
- module = AnsibleModule(
- argument_spec=module_args,
- required_together=[('api_token_id', 'api_token_secret')],
- required_one_of=[('name', 'vmid'), ('api_password', 'api_token_id')],
- required_if=[
- ('create', 'forced', ['storage']),
- ('state', 'resized', ['size']),
- ],
- required_by={
- 'target_disk': 'target_vmid',
- 'mbps_max': 'mbps',
- 'mbps_rd_max': 'mbps_rd',
- 'mbps_wr_max': 'mbps_wr',
- 'bps_max_length': 'mbps_max',
- 'bps_rd_max_length': 'mbps_rd_max',
- 'bps_wr_max_length': 'mbps_wr_max',
- 'iops_max': 'iops',
- 'iops_rd_max': 'iops_rd',
- 'iops_wr_max': 'iops_wr',
- 'iops_max_length': 'iops_max',
- 'iops_rd_max_length': 'iops_rd_max',
- 'iops_wr_max_length': 'iops_wr_max',
- 'iso_image': 'media',
- },
- supports_check_mode=False,
- mutually_exclusive=[
- ('target_vmid', 'target_storage'),
- ('mbps', 'mbps_rd'),
- ('mbps', 'mbps_wr'),
- ('iops', 'iops_rd'),
- ('iops', 'iops_wr'),
- ('import_from', 'size'),
- ]
- )
-
- proxmox = ProxmoxDiskAnsible(module)
-
- disk = module.params['disk']
- # Verify disk name has appropriate name
- disk_regex = compile(r'^([a-z]+)([0-9]+)$')
- disk_bus = sub(disk_regex, r'\1', disk)
- disk_number = int(sub(disk_regex, r'\2', disk))
- if disk_bus not in proxmox.supported_bus_num_ranges:
- proxmox.module.fail_json(msg='Unsupported disk bus: %s' % disk_bus)
- elif disk_number not in proxmox.supported_bus_num_ranges[disk_bus]:
- bus_range = proxmox.supported_bus_num_ranges[disk_bus]
- proxmox.module.fail_json(msg='Disk %s number not in range %s..%s ' % (disk, bus_range[0], bus_range[-1]))
-
- name = module.params['name']
- state = module.params['state']
- vmid = module.params['vmid'] or proxmox.get_vmid(name)
-
- # Ensure VM id exists and retrieve its config
- vm = None
- vm_config = None
- try:
- vm = proxmox.get_vm(vmid)
- vm_config = proxmox.proxmox_api.nodes(vm['node']).qemu(vmid).config.get()
- except Exception as e:
- proxmox.module.fail_json(msg='Getting information for VM %s failed with exception: %s' % (vmid, str(e)))
-
- # Do not try to perform actions on missing disk
- if disk not in vm_config and state in ['resized', 'moved']:
- module.fail_json(vmid=vmid, msg='Unable to process missing disk %s in VM %s' % (disk, vmid))
-
- if state == 'present':
- try:
- changed, message = proxmox.create_disk(disk, vmid, vm, vm_config)
- module.exit_json(changed=changed, vmid=vmid, msg=message)
- except Exception as e:
- module.fail_json(vmid=vmid, msg='Unable to create/update disk %s in VM %s: %s' % (disk, vmid, str(e)))
-
- elif state == 'detached':
- try:
- if disk_bus == 'unused':
- module.exit_json(changed=False, vmid=vmid, msg='Disk %s already detached in VM %s' % (disk, vmid))
- if disk not in vm_config:
- module.exit_json(changed=False, vmid=vmid, msg="Disk %s not present in VM %s config" % (disk, vmid))
- proxmox.proxmox_api.nodes(vm['node']).qemu(vmid).unlink.put(idlist=disk, force=0)
- module.exit_json(changed=True, vmid=vmid, msg="Disk %s detached from VM %s" % (disk, vmid))
- except Exception as e:
- module.fail_json(msg="Failed to detach disk %s from VM %s with exception: %s" % (disk, vmid, str(e)))
-
- elif state == 'moved':
- try:
- changed, message = proxmox.move_disk(disk, vmid, vm, vm_config)
- module.exit_json(changed=changed, vmid=vmid, msg=message)
- except Exception as e:
- module.fail_json(msg="Failed to move disk %s in VM %s with exception: %s" % (disk, vmid, str(e)))
-
- elif state == 'resized':
- try:
- changed, message = proxmox.resize_disk(disk, vmid, vm, vm_config)
- module.exit_json(changed=changed, vmid=vmid, msg=message)
- except Exception as e:
- module.fail_json(msg="Failed to resize disk %s in VM %s with exception: %s" % (disk, vmid, str(e)))
-
- elif state == 'absent':
- try:
- if disk not in vm_config:
- module.exit_json(changed=False, vmid=vmid, msg="Disk %s is already absent in VM %s" % (disk, vmid))
- proxmox.proxmox_api.nodes(vm['node']).qemu(vmid).unlink.put(idlist=disk, force=1)
- module.exit_json(changed=True, vmid=vmid, msg="Disk %s removed from VM %s" % (disk, vmid))
- except Exception as e:
- module.fail_json(vmid=vmid, msg='Unable to remove disk %s from VM %s: %s' % (disk, vmid, str(e)))
-
-
-if __name__ == '__main__':
- main()
diff --git a/plugins/modules/proxmox_domain_info.py b/plugins/modules/proxmox_domain_info.py
deleted file mode 100644
index d9836da277..0000000000
--- a/plugins/modules/proxmox_domain_info.py
+++ /dev/null
@@ -1,137 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-#
-# Copyright Tristan Le Guern (@tleguern)
-# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = r"""
-module: proxmox_domain_info
-short_description: Retrieve information about one or more Proxmox VE domains
-version_added: 1.3.0
-description:
- - Retrieve information about one or more Proxmox VE domains.
-attributes:
- action_group:
- version_added: 9.0.0
-options:
- domain:
- description:
- - Restrict results to a specific authentication realm.
- aliases: ['realm', 'name']
- type: str
-author: Tristan Le Guern (@tleguern)
-extends_documentation_fragment:
- - community.general.proxmox.actiongroup_proxmox
- - community.general.proxmox.documentation
- - community.general.attributes
- - community.general.attributes.info_module
-"""
-
-
-EXAMPLES = r"""
-- name: List existing domains
- community.general.proxmox_domain_info:
- api_host: helldorado
- api_user: root@pam
- api_password: "{{ password | default(omit) }}"
- api_token_id: "{{ token_id | default(omit) }}"
- api_token_secret: "{{ token_secret | default(omit) }}"
- register: proxmox_domains
-
-- name: Retrieve information about the pve domain
- community.general.proxmox_domain_info:
- api_host: helldorado
- api_user: root@pam
- api_password: "{{ password | default(omit) }}"
- api_token_id: "{{ token_id | default(omit) }}"
- api_token_secret: "{{ token_secret | default(omit) }}"
- domain: pve
- register: proxmox_domain_pve
-"""
-
-
-RETURN = r"""
-proxmox_domains:
- description: List of authentication domains.
- returned: always, but can be empty
- type: list
- elements: dict
- contains:
- comment:
- description: Short description of the realm.
- returned: on success
- type: str
- realm:
- description: Realm name.
- returned: on success
- type: str
- type:
- description: Realm type.
- returned: on success
- type: str
- digest:
- description: Realm hash.
- returned: on success, can be absent
- type: str
-"""
-
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible_collections.community.general.plugins.module_utils.proxmox import (
- proxmox_auth_argument_spec, ProxmoxAnsible)
-
-
-class ProxmoxDomainInfoAnsible(ProxmoxAnsible):
- def get_domain(self, realm):
- try:
- domain = self.proxmox_api.access.domains.get(realm)
- except Exception:
- self.module.fail_json(msg="Domain '%s' does not exist" % realm)
- domain['realm'] = realm
- return domain
-
- def get_domains(self):
- domains = self.proxmox_api.access.domains.get()
- return domains
-
-
-def proxmox_domain_info_argument_spec():
- return dict(
- domain=dict(type='str', aliases=['realm', 'name']),
- )
-
-
-def main():
- module_args = proxmox_auth_argument_spec()
- domain_info_args = proxmox_domain_info_argument_spec()
- module_args.update(domain_info_args)
-
- module = AnsibleModule(
- argument_spec=module_args,
- required_one_of=[('api_password', 'api_token_id')],
- required_together=[('api_token_id', 'api_token_secret')],
- supports_check_mode=True
- )
- result = dict(
- changed=False
- )
-
- proxmox = ProxmoxDomainInfoAnsible(module)
- domain = module.params['domain']
-
- if domain:
- domains = [proxmox.get_domain(realm=domain)]
- else:
- domains = proxmox.get_domains()
- result['proxmox_domains'] = domains
-
- module.exit_json(**result)
-
-
-if __name__ == '__main__':
- main()
diff --git a/plugins/modules/proxmox_group_info.py b/plugins/modules/proxmox_group_info.py
deleted file mode 100644
index f62d467af8..0000000000
--- a/plugins/modules/proxmox_group_info.py
+++ /dev/null
@@ -1,147 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-#
-# Copyright Tristan Le Guern
-# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = r"""
-module: proxmox_group_info
-short_description: Retrieve information about one or more Proxmox VE groups
-version_added: 1.3.0
-description:
- - Retrieve information about one or more Proxmox VE groups.
-attributes:
- action_group:
- version_added: 9.0.0
-options:
- group:
- description:
- - Restrict results to a specific group.
- aliases: ['groupid', 'name']
- type: str
-author: Tristan Le Guern (@tleguern)
-extends_documentation_fragment:
- - community.general.proxmox.actiongroup_proxmox
- - community.general.proxmox.documentation
- - community.general.attributes
- - community.general.attributes.info_module
-"""
-
-
-EXAMPLES = r"""
-- name: List existing groups
- community.general.proxmox_group_info:
- api_host: helldorado
- api_user: root@pam
- api_password: "{{ password | default(omit) }}"
- api_token_id: "{{ token_id | default(omit) }}"
- api_token_secret: "{{ token_secret | default(omit) }}"
- register: proxmox_groups
-
-- name: Retrieve information about the admin group
- community.general.proxmox_group_info:
- api_host: helldorado
- api_user: root@pam
- api_password: "{{ password | default(omit) }}"
- api_token_id: "{{ token_id | default(omit) }}"
- api_token_secret: "{{ token_secret | default(omit) }}"
- group: admin
- register: proxmox_group_admin
-"""
-
-
-RETURN = r"""
-proxmox_groups:
- description: List of groups.
- returned: always, but can be empty
- type: list
- elements: dict
- contains:
- comment:
- description: Short description of the group.
- returned: on success, can be absent
- type: str
- groupid:
- description: Group name.
- returned: on success
- type: str
- users:
- description: List of users in the group.
- returned: on success
- type: list
- elements: str
-"""
-
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible_collections.community.general.plugins.module_utils.proxmox import (
- proxmox_auth_argument_spec, ProxmoxAnsible)
-
-
-class ProxmoxGroupInfoAnsible(ProxmoxAnsible):
- def get_group(self, groupid):
- try:
- group = self.proxmox_api.access.groups.get(groupid)
- except Exception:
- self.module.fail_json(msg="Group '%s' does not exist" % groupid)
- group['groupid'] = groupid
- return ProxmoxGroup(group)
-
- def get_groups(self):
- groups = self.proxmox_api.access.groups.get()
- return [ProxmoxGroup(group) for group in groups]
-
-
-class ProxmoxGroup:
- def __init__(self, group):
- self.group = dict()
- # Data representation is not the same depending on API calls
- for k, v in group.items():
- if k == 'users' and isinstance(v, str):
- self.group['users'] = v.split(',')
- elif k == 'members':
- self.group['users'] = group['members']
- else:
- self.group[k] = v
-
-
-def proxmox_group_info_argument_spec():
- return dict(
- group=dict(type='str', aliases=['groupid', 'name']),
- )
-
-
-def main():
- module_args = proxmox_auth_argument_spec()
- group_info_args = proxmox_group_info_argument_spec()
- module_args.update(group_info_args)
-
- module = AnsibleModule(
- argument_spec=module_args,
- required_one_of=[('api_password', 'api_token_id')],
- required_together=[('api_token_id', 'api_token_secret')],
- supports_check_mode=True
- )
- result = dict(
- changed=False
- )
-
- proxmox = ProxmoxGroupInfoAnsible(module)
- group = module.params['group']
-
- if group:
- groups = [proxmox.get_group(groupid=group)]
- else:
- groups = proxmox.get_groups()
- result['proxmox_groups'] = [group.group for group in groups]
-
- module.exit_json(**result)
-
-
-if __name__ == '__main__':
- main()
diff --git a/plugins/modules/proxmox_kvm.py b/plugins/modules/proxmox_kvm.py
deleted file mode 100644
index c159ced6e6..0000000000
--- a/plugins/modules/proxmox_kvm.py
+++ /dev/null
@@ -1,1655 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-#
-# Copyright (c) 2016, Abdoul Bah (@helldorado)
-# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-DOCUMENTATION = r"""
-module: proxmox_kvm
-short_description: Management of Qemu(KVM) Virtual Machines in Proxmox VE cluster
-description:
- - Allows you to create/delete/stop Qemu(KVM) Virtual Machines in Proxmox VE cluster.
- - Since community.general 4.0.0 on, there are no more default values.
-author: "Abdoul Bah (@helldorado) "
-attributes:
- check_mode:
- support: none
- diff_mode:
- support: none
- action_group:
- version_added: 9.0.0
-options:
- archive:
- description:
- - Specify a path to an archive to restore (instead of creating or cloning a VM).
- type: str
- version_added: 6.5.0
- acpi:
- description:
- - Specify if ACPI should be enabled/disabled.
- type: bool
- agent:
- description:
- - Specify if the QEMU Guest Agent should be enabled/disabled.
- - Since community.general 5.5.0, this can also be a string instead of a boolean. This allows to specify values such
- as V(enabled=1,fstrim_cloned_disks=1).
- type: str
- args:
- description:
- - Pass arbitrary arguments to kvm.
- - This option is for experts only!
- type: str
- audio:
- description:
- - A hash/dictionary of audio devices for the VM. O(audio={"key":"value", "key":"value"}).
- - Keys allowed are - C(audio[n]) where 0 ≤ n ≤ N.
- - Values allowed are - C(device="ich9-intel-hda|intel-hda|AC97",driver="none|spice").
- - C(device) is either V(ich9-intel-hda) or V(intel-hda) or V(AC97).
- - Option C(driver) is V(none) or V(spice).
- type: dict
- version_added: 10.5.0
- autostart:
- description:
- - Specify if the VM should be automatically restarted after crash (currently ignored in PVE API).
- type: bool
- balloon:
- description:
- - Specify the amount of RAM for the VM in MB.
- - Using zero disables the balloon driver.
- type: int
- bios:
- description:
- - Specify the BIOS implementation.
- type: str
- choices: ['seabios', 'ovmf']
- boot:
- description:
- - Specify the boot order -> boot on floppy V(a), hard disk V(c), CD-ROM V(d), or network V(n).
- - For newer versions of Proxmox VE, use a boot order like V(order=scsi0;net0;hostpci0).
- - You can combine to set order.
- type: str
- bootdisk:
- description:
- - Enable booting from specified disk. Format V((ide|sata|scsi|virtio\)\\d+).
- type: str
- cicustom:
- description:
- - 'Cloud-init: Specify custom files to replace the automatically generated ones at start.'
- type: str
- version_added: 1.3.0
- cipassword:
- description:
- - 'Cloud-init: password of default user to create.'
- type: str
- version_added: 1.3.0
- citype:
- description:
- - 'Cloud-init: Specifies the cloud-init configuration format.'
- - The default depends on the configured operating system type (V(ostype)).
- - We use the V(nocloud) format for Linux, and V(configdrive2) for Windows.
- type: str
- choices: ['nocloud', 'configdrive2']
- version_added: 1.3.0
- ciupgrade:
- description:
- - 'Cloud-init: do an automatic package upgrade after the first boot.'
- type: bool
- version_added: 10.0.0
- ciuser:
- description:
- - 'Cloud-init: username of default user to create.'
- type: str
- version_added: 1.3.0
- clone:
- description:
- - Name of VM to be cloned. If O(vmid) is set, O(clone) can take an arbitrary value but is required for initiating the
- clone.
- type: str
- cores:
- description:
- - Specify number of cores per socket.
- type: int
- cpu:
- description:
- - Specify emulated CPU type.
- type: str
- cpulimit:
- description:
- - Specify if CPU usage will be limited. Value V(0) indicates no CPU limit.
- - If the computer has 2 CPUs, it has total of '2' CPU time.
- type: int
- cpuunits:
- description:
- - Specify CPU weight for a VM.
- - You can disable fair-scheduler configuration by setting this to V(0).
- type: int
- delete:
- description:
- - Specify a list of settings you want to delete.
- type: str
- description:
- description:
- - Specify the description for the VM. Only used on the configuration web interface.
- - This is saved as comment inside the configuration file.
- type: str
- digest:
- description:
- - Specify if to prevent changes if current configuration file has different SHA1 digest.
- - This can be used to prevent concurrent modifications.
- type: str
- efidisk0:
- description:
- - Specify a hash/dictionary of EFI disk options.
- - Requires O(bios=ovmf) to be set to be able to use it.
- type: dict
- suboptions:
- storage:
- description:
- - V(storage) is the storage identifier where to create the disk.
- type: str
- format:
- description:
- - V(format) is the drive's backing file's data format. Please refer to the Proxmox VE Administrator Guide, section
- Proxmox VE Storage (see U(https://pve.proxmox.com/pve-docs/chapter-pvesm.html) for the latest version, tables
- 3 to 14) to find out format supported by the provided storage backend.
- type: str
- efitype:
- description:
- - V(efitype) indicates the size of the EFI disk.
- - V(2m) will allow for a 2MB EFI disk, which will be enough to persist boot order and new boot entries.
- - V(4m) will allow for a 4MB EFI disk, which will additionally allow to store EFI keys in order to enable Secure
- Boot.
- type: str
- choices:
- - 2m
- - 4m
- pre_enrolled_keys:
- description:
- - V(pre_enrolled_keys) indicates whether EFI keys for Secure Boot should be enrolled V(1) in the VM firmware upon
- creation or not (0).
- - If set to V(1), Secure Boot will also be enabled by default when the VM is created.
- type: bool
- version_added: 4.5.0
- force:
- description:
- - Allow to force stop VM.
- - Can be used with states V(stopped), V(restarted), and V(absent).
- - Requires parameter O(archive).
- type: bool
- format:
- description:
- - Target drive's backing file's data format.
- - Used only with clone.
- - Use O(format=unspecified) and O(full=false) for a linked clone.
- - Please refer to the Proxmox VE Administrator Guide, section Proxmox VE Storage (see U(https://pve.proxmox.com/pve-docs/chapter-pvesm.html)
- for the latest version, tables 3 to 14) to find out format supported by the provided storage backend.
- - Not specifying this option is equivalent to setting it to V(unspecified).
- type: str
- choices: ["cloop", "cow", "qcow", "qcow2", "qed", "raw", "vmdk", "unspecified"]
- freeze:
- description:
- - Specify if PVE should freeze CPU at startup (use 'c' monitor command to start execution).
- type: bool
- full:
- description:
- - Create a full copy of all disk. This is always done when you clone a normal VM.
- - For VM templates, we try to create a linked clone by default.
- - Used only with clone.
- type: bool
- default: true
- hookscript:
- description:
- - Script that will be executed during various steps in the containers lifetime.
- type: str
- version_added: 8.1.0
- hostpci:
- description:
- - Specify a hash/dictionary of map host pci devices into guest. O(hostpci='{"key":"value", "key":"value"}').
- - Keys allowed are - C(hostpci[n]) where 0 ≤ n ≤ N.
- - Values allowed are - V("host="HOSTPCIID[;HOSTPCIID2...]",pcie="1|0",rombar="1|0",x-vga="1|0"").
- - The C(host) parameter is Host PCI device pass through. HOSTPCIID syntax is V(bus:dev.func) (hexadecimal numbers).
- - V(pcie=boolean) V(default=0) Choose the PCI-express bus (needs the q35 machine model).
- - V(rombar=boolean) V(default=1) Specify whether or not the device's ROM will be visible in the guest's memory map.
- - V(x-vga=boolean) V(default=0) Enable vfio-vga device support.
- - /!\ This option allows direct access to host hardware. So it is no longer possible to migrate such machines - use
- with special care.
- type: dict
- hotplug:
- description:
- - Selectively enable hotplug features.
- - This is a comma separated list of hotplug features V(network), V(disk), V(cpu), V(memory), and V(usb).
- - Value 0 disables hotplug completely and value 1 is an alias for the default V(network,disk,usb).
- type: str
- hugepages:
- description:
- - Enable/disable hugepages memory.
- type: str
- choices: ['any', '2', '1024']
- ide:
- description:
- - A hash/dictionary of volume used as IDE hard disk or CD-ROM. O(ide='{"key":"value", "key":"value"}').
- - Keys allowed are - V(ide[n]) where 0 ≤ n ≤ 3.
- - Values allowed are - V("storage:size,format=value").
- - V(storage) is the storage identifier where to create the disk.
- - V(size) is the size of the disk in GB.
- - V(format) is the drive's backing file's data format. V(qcow2|raw|subvol). Please refer to the Proxmox VE Administrator
- Guide, section Proxmox VE Storage (see U(https://pve.proxmox.com/pve-docs/chapter-pvesm.html) for the latest version,
- tables 3 to 14) to find out format supported by the provided storage backend.
- type: dict
- ipconfig:
- description:
- - 'Cloud-init: Set the IP configuration.'
- - A hash/dictionary of network IP configurations. O(ipconfig='{"key":"value", "key":"value"}').
- - Keys allowed are - V(ipconfig[n]) where 0 ≤ n ≤ network interfaces.
- - Values allowed are - V("[gw=] [,gw6=] [,ip=] [,ip6=]").
- - 'Cloud-init: Specify IP addresses and gateways for the corresponding interface.'
- - IP addresses use CIDR notation, gateways are optional but they should be in the same subnet of specified IP address.
- - The special string V(dhcp) can be used for IP addresses to use DHCP, in which case no explicit gateway should be provided.
- - For IPv6 the special string V(auto) can be used to use stateless autoconfiguration.
- - If cloud-init is enabled and neither an IPv4 nor an IPv6 address is specified, it defaults to using dhcp on IPv4.
- type: dict
- version_added: 1.3.0
- keyboard:
- description:
- - Sets the keyboard layout for VNC server.
- type: str
- kvm:
- description:
- - Enable/disable KVM hardware virtualization.
- type: bool
- localtime:
- description:
- - Sets the real time clock to local time.
- - This is enabled by default if ostype indicates a Microsoft OS.
- type: bool
- lock:
- description:
- - Lock/unlock the VM.
- type: str
- choices: ['migrate', 'backup', 'snapshot', 'rollback']
- machine:
- description:
- - Specifies the Qemu machine type.
- - Type => V((pc|pc(-i440fx\)?-\\d+\\.\\d+(\\.pxe\)?|q35|pc-q35-\\d+\\.\\d+(\\.pxe\)?\)).
- type: str
- memory:
- description:
- - Memory size in MB for instance.
- type: int
- migrate:
- description:
- - Migrate the VM to O(node) if it is on another node.
- type: bool
- default: false
- version_added: 7.0.0
- migrate_downtime:
- description:
- - Sets maximum tolerated downtime (in seconds) for migrations.
- type: int
- migrate_speed:
- description:
- - Sets maximum speed (in MB/s) for migrations.
- - A value of 0 is no limit.
- type: int
- name:
- description:
- - Specifies the VM name. Name could be non-unique across the cluster.
- - Required only for O(state=present).
- - With O(state=present) if O(vmid) not provided and VM with name exists in the cluster then no changes will be made.
- type: str
- nameservers:
- description:
- - 'Cloud-init: DNS server IP address(es).'
- - If unset, PVE host settings are used.
- type: list
- elements: str
- version_added: 1.3.0
- net:
- description:
- - A hash/dictionary of network interfaces for the VM. O(net='{"key":"value", "key":"value"}').
- - Keys allowed are - C(net[n]) where 0 ≤ n ≤ N.
- - Values allowed are - C("model="XX:XX:XX:XX:XX:XX",bridge="value",rate="value",tag="value",firewall="1|0",trunks="vlanid"").
- - Model is one of C(e1000 e1000-82540em e1000-82544gc e1000-82545em i82551 i82557b i82559er ne2k_isa ne2k_pci pcnet
- rtl8139 virtio vmxnet3).
- - C(XX:XX:XX:XX:XX:XX) should be an unique MAC address. This is automatically generated if not specified.
- - The C(bridge) parameter can be used to automatically add the interface to a bridge device. The Proxmox VE standard
- bridge is called 'vmbr0'.
- - Option C(rate) is used to limit traffic bandwidth from and to this interface. It is specified as floating point number,
- unit is 'Megabytes per second'.
- - If you specify no bridge, we create a kvm 'user' (NATed) network device, which provides DHCP and DNS services.
- type: dict
- newid:
- description:
- - VMID for the clone. Used only with clone.
- - If newid is not set, the next available VM ID will be fetched from ProxmoxAPI.
- type: int
- numa:
- description:
- - A hash/dictionaries of NUMA topology. O(numa='{"key":"value", "key":"value"}').
- - Keys allowed are - V(numa[n]) where 0 ≤ n ≤ N.
- - Values allowed are - V("cpu="",hostnodes="",memory="number",policy="(bind|interleave|preferred)"").
- - V(cpus) CPUs accessing this NUMA node.
- - V(hostnodes) Host NUMA nodes to use.
- - V(memory) Amount of memory this NUMA node provides.
- - V(policy) NUMA allocation policy.
- type: dict
- numa_enabled:
- description:
- - Enables NUMA.
- type: bool
- onboot:
- description:
- - Specifies whether a VM will be started during system bootup.
- type: bool
- ostype:
- description:
- - Specifies guest operating system. This is used to enable special optimization/features for specific operating systems.
- - The l26 is Linux 2.6/3.X Kernel.
- type: str
- choices: ['other', 'wxp', 'w2k', 'w2k3', 'w2k8', 'wvista', 'win7', 'win8', 'win10', 'win11', 'l24', 'l26', 'solaris']
- parallel:
- description:
- - A hash/dictionary of map host parallel devices. O(parallel='{"key":"value", "key":"value"}').
- - Keys allowed are - (parallel[n]) where 0 ≤ n ≤ 2.
- - Values allowed are - C("/dev/parport\d+|/dev/usb/lp\d+").
- type: dict
- protection:
- description:
- - Enable/disable the protection flag of the VM. This will enable/disable the remove VM and remove disk operations.
- type: bool
- reboot:
- description:
- - Allow reboot. If set to V(true), the VM exit on reboot.
- type: bool
- revert:
- description:
- - Revert a pending change.
- type: str
- sata:
- description:
- - A hash/dictionary of volume used as sata hard disk or CD-ROM. O(sata='{"key":"value", "key":"value"}').
- - Keys allowed are - C(sata[n]) where 0 ≤ n ≤ 5.
- - Values allowed are - C("storage:size,format=value").
- - C(storage) is the storage identifier where to create the disk.
- - C(size) is the size of the disk in GB.
- - C(format) is the drive's backing file's data format. C(qcow2|raw|subvol). Please refer to the Proxmox VE Administrator
- Guide, section Proxmox VE Storage (see U(https://pve.proxmox.com/pve-docs/chapter-pvesm.html) for the latest version,
- tables 3 to 14) to find out format supported by the provided storage backend.
- type: dict
- scsi:
- description:
- - A hash/dictionary of volume used as SCSI hard disk or CD-ROM. O(scsi='{"key":"value", "key":"value"}').
- - Keys allowed are - C(scsi[n]) where 0 ≤ n ≤ 13.
- - Values allowed are - C("storage:size,format=value").
- - C(storage) is the storage identifier where to create the disk.
- - C(size) is the size of the disk in GB.
- - C(format) is the drive's backing file's data format. C(qcow2|raw|subvol). Please refer to the Proxmox VE Administrator
- Guide, section Proxmox VE Storage (see U(https://pve.proxmox.com/pve-docs/chapter-pvesm.html) for the latest version,
- tables 3 to 14) to find out format supported by the provided storage backend.
- type: dict
- scsihw:
- description:
- - Specifies the SCSI controller model.
- type: str
- choices: ['lsi', 'lsi53c810', 'virtio-scsi-pci', 'virtio-scsi-single', 'megasas', 'pvscsi']
- searchdomains:
- description:
- - 'Cloud-init: Sets DNS search domain(s).'
- - If unset, PVE host settings are used.
- type: list
- elements: str
- version_added: 1.3.0
- serial:
- description:
- - A hash/dictionary of serial device to create inside the VM. V('{"key":"value", "key":"value"}').
- - Keys allowed are - serial[n](str; required) where 0 ≤ n ≤ 3.
- - Values allowed are - V((/dev/.+|socket\)).
- - /!\ If you pass through a host serial device, it is no longer possible to migrate such machines - use with special
- care.
- type: dict
- shares:
- description:
- - Rets amount of memory shares for auto-ballooning. (0 - 50000).
- - The larger the number is, the more memory this VM gets.
- - The number is relative to weights of all other running VMs.
- - Using 0 disables auto-ballooning, this means no limit.
- type: int
- skiplock:
- description:
- - Ignore locks.
- - Only root is allowed to use this option.
- type: bool
- smbios:
- description:
- - Specifies SMBIOS type 1 fields.
- - Comma separated, Base64 encoded (optional) SMBIOS properties:.
- - V([base64=<1|0>] [,family=]).
- - V([,manufacturer=]).
- - V([,product=]).
- - V([,serial=]).
- - V([,sku=]).
- - V([,uuid=]).
- - V([,version=]).
- type: str
- snapname:
- description:
- - The name of the snapshot. Used only with clone.
- type: str
- sockets:
- description:
- - Sets the number of CPU sockets. (1 - N).
- type: int
- sshkeys:
- description:
- - 'Cloud-init: SSH key to assign to the default user. NOT TESTED with multiple keys but a multi-line value should work.'
- type: str
- version_added: 1.3.0
- startdate:
- description:
- - Sets the initial date of the real time clock.
- - Valid format for date are V('now') or V('2016-09-25T16:01:21') or V('2016-09-25').
- type: str
- startup:
- description:
- - Startup and shutdown behavior. V([[order=]\\d+] [,up=\\d+] [,down=\\d+]).
- - Order is a non-negative number defining the general startup order.
- - Shutdown in done with reverse ordering.
- type: str
- state:
- description:
- - Indicates desired state of the instance.
- - If V(current), the current state of the VM will be fetched. You can access it with C(results.status).
- - V(template) was added in community.general 8.1.0.
- - V(paused) and V(hibernated) were added in community.general 10.4.0.
- type: str
- choices: ['present', 'started', 'absent', 'stopped', 'restarted', 'current', 'template', 'paused', 'hibernated']
- default: present
- storage:
- description:
- - Target storage for full clone.
- type: str
- tablet:
- description:
- - Enables/disables the USB tablet device.
- type: bool
- tags:
- description:
- - List of tags to apply to the VM instance.
- - Tags must start with V([a-z0-9_]) followed by zero or more of the following characters V([a-z0-9_-+.]).
- - Tags are only available in Proxmox 6+.
- type: list
- elements: str
- version_added: 2.3.0
- target:
- description:
- - Target node. Only allowed if the original VM is on shared storage.
- - Used only with clone.
- type: str
- tdf:
- description:
- - Enables/disables time drift fix.
- type: bool
- template:
- description:
- - Enables/disables the template.
- type: bool
- timeout:
- description:
- - Timeout for operations.
- - When used with O(state=stopped) the option sets a graceful timeout for VM stop after which a VM will be forcefully
- stopped.
- type: int
- default: 30
- tpmstate0:
- description:
- - A hash/dictionary of options for the Trusted Platform Module disk.
- - A TPM state disk is required for Windows 11 installations.
- suboptions:
- storage:
- description:
- - O(tpmstate0.storage) is the storage identifier where to create the disk.
- type: str
- required: true
- version:
- description:
- - The TPM version to use.
- type: str
- choices: ['1.2', '2.0']
- default: '2.0'
- type: dict
- version_added: 7.1.0
- usb:
- description:
- - A hash/dictionary of USB devices for the VM. O(usb='{"key":"value", "key":"value"}').
- - Keys allowed are - C(usb[n]) where 0 ≤ n ≤ N.
- - Values allowed are - C(host="value|spice",mapping="value",usb3="1|0").
- - Host is either C(spice) or the USB id/port.
- - Option C(mapping) is the mapped USB device name.
- - Option C(usb3) enables USB 3 support.
- type: dict
- version_added: 9.0.0
- update:
- description:
- - If V(true), the VM will be updated with new value.
- - Because of the operations of the API and security reasons, I have disabled the update of the following parameters
- O(net), O(virtio), O(ide), O(sata), O(scsi). Per example updating O(net) update the MAC address and O(virtio) create
- always new disk... This security feature can be disabled by setting the O(update_unsafe) to V(true).
- - Update of O(pool) is disabled. It needs an additional API endpoint not covered by this module.
- type: bool
- default: false
- update_unsafe:
- description:
- - If V(true), do not enforce limitations on parameters O(net), O(virtio), O(ide), O(sata), O(scsi), O(efidisk0), and
- O(tpmstate0). Use this option with caution because an improper configuration might result in a permanent loss of data
- (for example disk recreated).
- type: bool
- default: false
- version_added: 8.4.0
- vcpus:
- description:
- - Sets number of hotplugged vcpus.
- type: int
- vga:
- description:
- - Select VGA type. If you want to use high resolution modes (>= 1280x1024x16) then you should use option V(std) or V(vmware).
- type: str
- choices: ['std', 'cirrus', 'vmware', 'qxl', 'serial0', 'serial1', 'serial2', 'serial3', 'qxl2', 'qxl3', 'qxl4']
- virtio:
- description:
- - A hash/dictionary of volume used as VIRTIO hard disk. O(virtio='{"key":"value", "key":"value"}').
- - Keys allowed are - V(virtio[n]) where 0 ≤ n ≤ 15.
- - Values allowed are - V(storage:size,format=value).
- - V(storage) is the storage identifier where to create the disk.
- - V(size) is the size of the disk in GB.
- - V(format) is the drive's backing file's data format. V(qcow2|raw|subvol). Please refer to the Proxmox VE Administrator
- Guide, section Proxmox VE Storage (see U(https://pve.proxmox.com/pve-docs/chapter-pvesm.html) for the latest version,
- tables 3 to 14) to find out format supported by the provided storage backend.
- type: dict
- watchdog:
- description:
- - Creates a virtual hardware watchdog device.
- type: str
-seealso:
- - module: community.general.proxmox_vm_info
-extends_documentation_fragment:
- - community.general.proxmox.actiongroup_proxmox
- - community.general.proxmox.documentation
- - community.general.proxmox.selection
- - community.general.attributes
-"""
-
-EXAMPLES = r"""
-- name: Create new VM with minimal options
- community.general.proxmox_kvm:
- api_user: root@pam
- api_password: secret
- api_host: helldorado
- name: spynal
- node: sabrewulf
-
-- name: Create a VM from archive (backup)
- community.general.proxmox_kvm:
- api_user: root@pam
- api_password: secret
- api_host: helldorado
- archive: backup-storage:backup/vm/140/2023-03-08T06:41:23Z
- name: spynal
-
-- name: Create new VM with minimal options and given vmid
- community.general.proxmox_kvm:
- api_user: root@pam
- api_password: secret
- api_host: helldorado
- name: spynal
- node: sabrewulf
- vmid: 100
-
-- name: Create new VM with two network interface options
- community.general.proxmox_kvm:
- api_user: root@pam
- api_password: secret
- api_host: helldorado
- name: spynal
- node: sabrewulf
- net:
- net0: 'virtio,bridge=vmbr1,rate=200'
- net1: 'e1000,bridge=vmbr2'
-
-- name: Create new VM with one network interface, three virto hard disk, 4 cores, and 2 vcpus
- community.general.proxmox_kvm:
- api_user: root@pam
- api_password: secret
- api_host: helldorado
- name: spynal
- node: sabrewulf
- net:
- net0: 'virtio,bridge=vmbr1,rate=200'
- virtio:
- virtio0: 'VMs_LVM:10'
- virtio1: 'VMs:2,format=qcow2'
- virtio2: 'VMs:5,format=raw'
- cores: 4
- vcpus: 2
-
-- name: Create VM with 1 10GB SATA disk and an EFI disk, with Secure Boot disabled by default
- community.general.proxmox_kvm:
- api_user: root@pam
- api_password: secret
- api_host: helldorado
- name: spynal
- node: sabrewulf
- sata:
- sata0: 'VMs_LVM:10,format=raw'
- bios: ovmf
- efidisk0:
- storage: VMs_LVM_thin
- format: raw
- efitype: 4m
- pre_enrolled_keys: false
-
-- name: Create VM with 1 10GB SATA disk and an EFI disk, with Secure Boot enabled by default
- community.general.proxmox_kvm:
- api_user: root@pam
- api_password: secret
- api_host: helldorado
- name: spynal
- node: sabrewulf
- sata:
- sata0: 'VMs_LVM:10,format=raw'
- bios: ovmf
- efidisk0:
- storage: VMs_LVM
- format: raw
- efitype: 4m
- pre_enrolled_keys: 1
-
-- name: >
- Clone VM with only source VM name.
- The VM source is spynal.
- The target VM name is zavala
- community.general.proxmox_kvm:
- api_user: root@pam
- api_password: secret
- api_host: helldorado
- clone: spynal
- name: zavala
- node: sabrewulf
- storage: VMs
- format: qcow2
- timeout: 500
-
-- name: >
- Create linked clone VM with only source VM name.
- The VM source is spynal.
- The target VM name is zavala
- community.general.proxmox_kvm:
- api_user: root@pam
- api_password: secret
- api_host: helldorado
- clone: spynal
- name: zavala
- node: sabrewulf
- storage: VMs
- full: false
- format: unspecified
- timeout: 500
-
-- name: Clone VM with source vmid and target newid and raw format
- community.general.proxmox_kvm:
- api_user: root@pam
- api_password: secret
- api_host: helldorado
- clone: arbitrary_name
- vmid: 108
- newid: 152
- name: zavala
- node: sabrewulf
- storage: LVM_STO
- format: raw
- timeout: 300
-
-- name: Create new VM and lock it for snapshot
- community.general.proxmox_kvm:
- api_user: root@pam
- api_password: secret
- api_host: helldorado
- name: spynal
- node: sabrewulf
- lock: snapshot
-
-- name: Create new VM and set protection to disable the remove VM and remove disk operations
- community.general.proxmox_kvm:
- api_user: root@pam
- api_password: secret
- api_host: helldorado
- name: spynal
- node: sabrewulf
- protection: true
-
-- name: Create new VM using cloud-init with a username and password
- community.general.proxmox_kvm:
- node: sabrewulf
- api_user: root@pam
- api_password: secret
- api_host: helldorado
- name: spynal
- ide:
- ide2: 'local:cloudinit,format=qcow2'
- ciuser: mylinuxuser
- cipassword: supersecret
- searchdomains: 'mydomain.internal'
- nameservers: 1.1.1.1
- net:
- net0: 'virtio,bridge=vmbr1,tag=77'
- ipconfig:
- ipconfig0: 'ip=192.168.1.1/24,gw=192.168.1.1'
-
-- name: Create new VM using Cloud-Init with an ssh key
- community.general.proxmox_kvm:
- node: sabrewulf
- api_user: root@pam
- api_password: secret
- api_host: helldorado
- name: spynal
- ide:
- ide2: 'local:cloudinit,format=qcow2'
- sshkeys: 'ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAILJkVm98B71lD5XHfihwcYHE9TVpsJmK1vR1JcaU82L+'
- searchdomains: 'mydomain.internal'
- nameservers:
- - '1.1.1.1'
- - '8.8.8.8'
- net:
- net0: 'virtio,bridge=vmbr1,tag=77'
- ipconfig:
- ipconfig0: 'ip=192.168.1.1/24'
-
-- name: Start VM
- community.general.proxmox_kvm:
- api_user: root@pam
- api_password: secret
- api_host: helldorado
- name: spynal
- node: sabrewulf
- state: started
-
-- name: Stop VM
- community.general.proxmox_kvm:
- api_user: root@pam
- api_password: secret
- api_host: helldorado
- name: spynal
- node: sabrewulf
- state: stopped
-
-- name: Stop VM with force
- community.general.proxmox_kvm:
- api_user: root@pam
- api_password: secret
- api_host: helldorado
- name: spynal
- node: sabrewulf
- state: stopped
- force: true
-
-- name: Restart VM
- community.general.proxmox_kvm:
- api_user: root@pam
- api_password: secret
- api_host: helldorado
- name: spynal
- node: sabrewulf
- state: restarted
-
-- name: Convert VM to template
- community.general.proxmox_kvm:
- api_user: root@pam
- api_password: secret
- api_host: helldorado
- name: spynal
- node: sabrewulf
- state: template
-
-- name: Convert VM to template (stop VM if running)
- community.general.proxmox_kvm:
- api_user: root@pam
- api_password: secret
- api_host: helldorado
- name: spynal
- node: sabrewulf
- state: template
- force: true
-
-- name: Remove VM
- community.general.proxmox_kvm:
- api_user: root@pam
- api_password: secret
- api_host: helldorado
- name: spynal
- node: sabrewulf
- state: absent
-
-- name: Get VM current state
- community.general.proxmox_kvm:
- api_user: root@pam
- api_password: secret
- api_host: helldorado
- name: spynal
- node: sabrewulf
- state: current
-
-- name: Update VM configuration
- community.general.proxmox_kvm:
- api_user: root@pam
- api_password: secret
- api_host: helldorado
- name: spynal
- node: sabrewulf
- cores: 8
- memory: 16384
- update: true
-
-- name: Update VM configuration (incl. unsafe options)
- community.general.proxmox_kvm:
- api_user: root@pam
- api_password: secret
- api_host: helldorado
- name: spynal
- node: sabrewulf
- cores: 8
- memory: 16384
- net:
- net0: virtio,bridge=vmbr1
- update: true
- update_unsafe: true
-
-- name: Delete QEMU parameters
- community.general.proxmox_kvm:
- api_user: root@pam
- api_password: secret
- api_host: helldorado
- name: spynal
- node: sabrewulf
- delete: 'args,template,cpulimit'
-
-- name: Revert a pending change
- community.general.proxmox_kvm:
- api_user: root@pam
- api_password: secret
- api_host: helldorado
- name: spynal
- node: sabrewulf
- revert: 'template,cpulimit'
-
-- name: Migrate VM on second node
- community.general.proxmox_kvm:
- api_user: root@pam
- api_password: secret
- api_host: helldorado
- name: spynal
- node: sabrewulf-2
- migrate: true
-
-- name: Add hookscript to existing VM
- community.general.proxmox_kvm:
- api_user: root@pam
- api_password: secret
- api_host: helldorado
- vmid: 999
- node: sabrewulf
- hookscript: local:snippets/hookscript.pl
- update: true
-"""
-
-RETURN = r"""
-vmid:
- description: The VM vmid.
- returned: success
- type: int
- sample: 115
-status:
- description: The current virtual machine status.
- returned: success, not clone, not absent, not update
- type: str
- sample: running
-msg:
- description: A short message.
- returned: always
- type: str
- sample: "VM kropta with vmid = 110 is running"
-"""
-
-import re
-import time
-from ansible.module_utils.six.moves.urllib.parse import quote
-
-from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
-from ansible_collections.community.general.plugins.module_utils.proxmox import (proxmox_auth_argument_spec, ProxmoxAnsible)
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible.module_utils.parsing.convert_bool import boolean
-
-
-def parse_mac(netstr):
- return re.search('=(.*?),', netstr).group(1)
-
-
-def parse_dev(devstr):
- return re.search('(.*?)(,|$)', devstr).group(1)
-
-
-class ProxmoxKvmAnsible(ProxmoxAnsible):
- def get_vminfo(self, node, vmid, **kwargs):
- global results
- results = {}
- mac = {}
- devices = {}
- try:
- vm = self.proxmox_api.nodes(node).qemu(vmid).config.get()
- except Exception as e:
- self.module.fail_json(msg='Getting information for VM with vmid = %s failed with exception: %s' % (vmid, e))
-
- # Sanitize kwargs. Remove not defined args and ensure True and False converted to int.
- kwargs = {k: v for k, v in kwargs.items() if v is not None}
-
- # Convert all dict in kwargs to elements.
- # For hostpci[n], ide[n], net[n], numa[n], parallel[n], sata[n], scsi[n], serial[n], virtio[n]
- for k in list(kwargs.keys()):
- if isinstance(kwargs[k], dict):
- kwargs.update(kwargs[k])
- del kwargs[k]
-
- # Split information by type
- re_net = re.compile(r'net[0-9]')
- re_dev = re.compile(r'(virtio|ide|scsi|sata|efidisk)[0-9]')
- for k in kwargs.keys():
- if re_net.match(k):
- mac[k] = parse_mac(vm[k])
- elif re_dev.match(k):
- devices[k] = parse_dev(vm[k])
-
- results['mac'] = mac
- results['devices'] = devices
- results['vmid'] = int(vmid)
-
- def settings(self, vmid, node, **kwargs):
- proxmox_node = self.proxmox_api.nodes(node)
-
- # Sanitize kwargs. Remove not defined args and ensure True and False converted to int.
- kwargs = {k: v for k, v in kwargs.items() if v is not None}
-
- return proxmox_node.qemu(vmid).config.set(**kwargs) is None
-
- def wait_for_task(self, node, taskid):
- timeout = self.module.params['timeout']
- if self.module.params['state'] == 'stopped':
- # Increase task timeout in case of stopped state to be sure it waits longer than VM stop operation itself
- timeout += 10
-
- while timeout:
- if self.api_task_ok(node, taskid):
- # Wait an extra second as the API can be a ahead of the hypervisor
- time.sleep(1)
- return True
- timeout = timeout - 1
- if timeout == 0:
- break
- time.sleep(1)
- return False
-
- def create_vm(self, vmid, newid, node, name, memory, cpu, cores, sockets, update, update_unsafe, **kwargs):
- # Available only in PVE 4
- only_v4 = ['force', 'protection', 'skiplock']
- only_v6 = ['ciuser', 'cipassword', 'sshkeys', 'ipconfig', 'tags']
- only_v8 = ['ciupgrade']
-
- # valid clone parameters
- valid_clone_params = ['format', 'full', 'pool', 'snapname', 'storage', 'target']
- clone_params = {}
- # Default args for vm. Note: -args option is for experts only. It allows you to pass arbitrary arguments to kvm.
- vm_args = "-serial unix:/var/run/qemu-server/{0}.serial,server,nowait".format(vmid)
-
- proxmox_node = self.proxmox_api.nodes(node)
-
- # Sanitize kwargs. Remove not defined args and ensure True and False converted to int.
- kwargs = {k: v for k, v in kwargs.items() if v is not None}
- kwargs.update({k: int(v) for k, v in kwargs.items() if isinstance(v, bool)})
-
- version = self.version()
- pve_major_version = 3 if version < LooseVersion('4.0') else version.version[0]
-
- # The features work only on PVE 4+
- if pve_major_version < 4:
- for p in only_v4:
- if p in kwargs:
- del kwargs[p]
-
- # The features work only on PVE 6
- if pve_major_version < 6:
- for p in only_v6:
- if p in kwargs:
- del kwargs[p]
-
- # The features work only on PVE 8
- if pve_major_version < 8:
- for p in only_v8:
- if p in kwargs:
- del kwargs[p]
-
- # 'sshkeys' param expects an urlencoded string
- if 'sshkeys' in kwargs:
- urlencoded_ssh_keys = quote(kwargs['sshkeys'], safe='')
- kwargs['sshkeys'] = str(urlencoded_ssh_keys)
-
- # If update, don't update disk (virtio, efidisk0, tpmstate0, ide, sata, scsi) and network interface, unless update_unsafe=True
- # pool parameter not supported by qemu//config endpoint on "update" (PVE 6.2) - only with "create"
- if update:
- if update_unsafe is False:
- if 'virtio' in kwargs:
- del kwargs['virtio']
- if 'sata' in kwargs:
- del kwargs['sata']
- if 'scsi' in kwargs:
- del kwargs['scsi']
- if 'ide' in kwargs:
- del kwargs['ide']
- if 'efidisk0' in kwargs:
- del kwargs['efidisk0']
- if 'tpmstate0' in kwargs:
- del kwargs['tpmstate0']
- if 'net' in kwargs:
- del kwargs['net']
- if 'force' in kwargs:
- del kwargs['force']
- if 'pool' in kwargs:
- del kwargs['pool']
-
- # Check that the bios option is set to ovmf if the efidisk0 option is present
- if 'efidisk0' in kwargs:
- if ('bios' not in kwargs) or ('ovmf' != kwargs['bios']):
- self.module.fail_json(msg='efidisk0 cannot be used if bios is not set to ovmf. ')
-
- # Flatten efidisk0 option to a string so that it is a string which is what Proxmoxer and the API expect
- if 'efidisk0' in kwargs:
- efidisk0_str = ''
- # Regexp to catch underscores in keys name, to replace them after by hyphens
- hyphen_re = re.compile(r'_')
- # If present, the storage definition should be the first argument
- if 'storage' in kwargs['efidisk0']:
- efidisk0_str += kwargs['efidisk0'].get('storage') + ':1,'
- kwargs['efidisk0'].pop('storage')
- # Join other elements from the dict as key=value using commas as separator, replacing any underscore in key
- # by hyphens (needed for pre_enrolled_keys to pre-enrolled-keys)
- efidisk0_str += ','.join([hyphen_re.sub('-', k) + "=" + str(v) for k, v in kwargs['efidisk0'].items()
- if 'storage' != k])
- kwargs['efidisk0'] = efidisk0_str
-
- # Flatten tpmstate0 option to a string so that it is a string which is what Proxmoxer and the API expect
- if 'tpmstate0' in kwargs:
- kwargs['tpmstate0'] = '{storage}:1,version=v{version}'.format(
- storage=kwargs['tpmstate0'].get('storage'),
- version=kwargs['tpmstate0'].get('version')
- )
-
- # Convert all dict in kwargs to elements.
- # For audio[n], hostpci[n], ide[n], net[n], numa[n], parallel[n], sata[n], scsi[n], serial[n], virtio[n], ipconfig[n], usb[n]
- for k in list(kwargs.keys()):
- if isinstance(kwargs[k], dict):
- kwargs.update(kwargs[k])
- del kwargs[k]
-
- if 'agent' in kwargs:
- try:
- # The API also allows booleans instead of e.g. `enabled=1` for backward-compatibility.
- kwargs['agent'] = int(boolean(kwargs['agent'], strict=True))
- except TypeError:
- # Not something that Ansible would parse as a boolean.
- pass
-
- # Rename numa_enabled to numa, according the API documentation
- if 'numa_enabled' in kwargs:
- kwargs['numa'] = kwargs['numa_enabled']
- del kwargs['numa_enabled']
-
- # PVE api expects strings for the following params
- if 'nameservers' in self.module.params:
- nameservers = self.module.params.pop('nameservers')
- if nameservers:
- kwargs['nameserver'] = ' '.join(nameservers)
- if 'searchdomains' in self.module.params:
- searchdomains = self.module.params.pop('searchdomains')
- if searchdomains:
- kwargs['searchdomain'] = ' '.join(searchdomains)
-
- # VM tags are expected to be valid and presented as a comma/semi-colon delimited string
- if 'tags' in kwargs:
- re_tag = re.compile(r'^[a-zA-Z0-9_][a-zA-Z0-9_\-\+\.]*$')
- for tag in kwargs['tags']:
- if not re_tag.match(tag):
- self.module.fail_json(msg='%s is not a valid tag' % tag)
- kwargs['tags'] = ",".join(kwargs['tags'])
-
- # -args and skiplock require root@pam user - but can not use api tokens
- if self.module.params['api_user'] == "root@pam" and self.module.params['args'] is not None:
- kwargs['args'] = self.module.params['args']
- elif self.module.params['api_user'] != "root@pam" and self.module.params['args'] is not None:
- self.module.fail_json(msg='args parameter require root@pam user. ')
-
- if self.module.params['api_user'] != "root@pam" and self.module.params['skiplock'] is not None:
- self.module.fail_json(msg='skiplock parameter require root@pam user. ')
-
- if update:
- if proxmox_node.qemu(vmid).config.set(name=name, memory=memory, cpu=cpu, cores=cores, sockets=sockets, **kwargs) is None:
- return True
- else:
- return False
- elif self.module.params['clone'] is not None:
- for param in valid_clone_params:
- if self.module.params[param] is not None:
- clone_params[param] = self.module.params[param]
- clone_params.update({k: int(v) for k, v in clone_params.items() if isinstance(v, bool)})
- taskid = proxmox_node.qemu(vmid).clone.post(newid=newid, name=name, **clone_params)
- else:
- taskid = proxmox_node.qemu.create(vmid=vmid, name=name, memory=memory, cpu=cpu, cores=cores, sockets=sockets, **kwargs)
-
- if not self.wait_for_task(node, taskid):
- self.module.fail_json(msg='Reached timeout while waiting for creating VM. Last line in task before timeout: %s' %
- proxmox_node.tasks(taskid).log.get()[:1])
- return False
- return True
-
- def start_vm(self, vm):
- vmid = vm['vmid']
- proxmox_node = self.proxmox_api.nodes(vm['node'])
- taskid = proxmox_node.qemu(vmid).status.start.post()
- if not self.wait_for_task(vm['node'], taskid):
- self.module.fail_json(msg='Reached timeout while waiting for starting VM. Last line in task before timeout: %s' %
- proxmox_node.tasks(taskid).log.get()[:1])
- return False
- return True
-
- def stop_vm(self, vm, force, timeout):
- vmid = vm['vmid']
- proxmox_node = self.proxmox_api.nodes(vm['node'])
- taskid = proxmox_node.qemu(vmid).status.shutdown.post(forceStop=(1 if force else 0), timeout=timeout)
- if not self.wait_for_task(vm['node'], taskid):
- self.module.fail_json(msg='Reached timeout while waiting for stopping VM. Last line in task before timeout: %s' %
- proxmox_node.tasks(taskid).log.get()[:1])
- return False
- return True
-
- def restart_vm(self, vm, force, **status):
- vmid = vm['vmid']
- try:
- proxmox_node = self.proxmox_api.nodes(vm['node'])
- taskid = proxmox_node.qemu(vmid).status.reset.post() if force else proxmox_node.qemu(vmid).status.reboot.post()
- if not self.wait_for_task(vm['node'], taskid):
- self.module.fail_json(msg='Reached timeout while waiting for rebooting VM. Last line in task before timeout: %s' %
- proxmox_node.tasks(taskid).log.get()[:1])
- return False
- return True
- except Exception as e:
- self.module.fail_json(vmid=vmid, msg="restarting of VM %s failed with exception: %s" % (vmid, e))
- return False
-
- def convert_to_template(self, vm, timeout, force):
- vmid = vm['vmid']
- try:
- proxmox_node = self.proxmox_api.nodes(vm['node'])
- if proxmox_node.qemu(vmid).status.current.get()['status'] == 'running' and force:
- self.stop_instance(vm, vmid, timeout, force)
- # not sure why, but templating a container doesn't return a taskid
- proxmox_node.qemu(vmid).template.post()
- return True
- except Exception as e:
- self.module.fail_json(vmid=vmid, msg="conversion of VM %s to template failed with exception: %s" % (vmid, e))
- return False
-
- def migrate_vm(self, vm, target_node):
- vmid = vm['vmid']
- proxmox_node = self.proxmox_api.nodes(vm['node'])
- taskid = proxmox_node.qemu(vmid).migrate.post(vmid=vmid, node=vm['node'], target=target_node, online=1)
- if not self.wait_for_task(vm['node'], taskid):
- self.module.fail_json(msg='Reached timeout while waiting for migrating VM. Last line in task before timeout: %s' %
- proxmox_node.tasks(taskid).log.get()[:1])
- return False
- return True
-
- def suspend_vm(self, vm, timeout, todisk):
- vmid = vm['vmid']
- proxmox_node = self.proxmox_api.nodes(vm['node'])
- taskid = proxmox_node.qemu(vmid).status.suspend.post(todisk=(1 if todisk else 0), timeout=timeout)
- if not self.wait_for_task(vm['node'], taskid):
- self.module.fail_json(msg='Reached timeout while waiting for suspending VM. Last line in task before timeout: %s' %
- proxmox_node.tasks(taskid).log.get()[:1])
- return False
- return True
-
-
-def main():
- module_args = proxmox_auth_argument_spec()
- kvm_args = dict(
- archive=dict(type='str'),
- acpi=dict(type='bool'),
- agent=dict(type='str'),
- args=dict(type='str'),
- audio=dict(type='dict'),
- autostart=dict(type='bool'),
- balloon=dict(type='int'),
- bios=dict(choices=['seabios', 'ovmf']),
- boot=dict(type='str'),
- bootdisk=dict(type='str'),
- cicustom=dict(type='str'),
- cipassword=dict(type='str', no_log=True),
- citype=dict(type='str', choices=['nocloud', 'configdrive2']),
- ciupgrade=dict(type='bool'),
- ciuser=dict(type='str'),
- clone=dict(type='str'),
- cores=dict(type='int'),
- cpu=dict(type='str'),
- cpulimit=dict(type='int'),
- cpuunits=dict(type='int'),
- delete=dict(type='str'),
- description=dict(type='str'),
- digest=dict(type='str'),
- efidisk0=dict(type='dict',
- options=dict(
- storage=dict(type='str'),
- format=dict(type='str'),
- efitype=dict(type='str', choices=['2m', '4m']),
- pre_enrolled_keys=dict(type='bool'),
- )),
- force=dict(type='bool'),
- format=dict(type='str', choices=['cloop', 'cow', 'qcow', 'qcow2', 'qed', 'raw', 'vmdk', 'unspecified']),
- freeze=dict(type='bool'),
- full=dict(type='bool', default=True),
- hookscript=dict(type='str'),
- hostpci=dict(type='dict'),
- hotplug=dict(type='str'),
- hugepages=dict(choices=['any', '2', '1024']),
- ide=dict(type='dict'),
- ipconfig=dict(type='dict'),
- keyboard=dict(type='str'),
- kvm=dict(type='bool'),
- localtime=dict(type='bool'),
- lock=dict(choices=['migrate', 'backup', 'snapshot', 'rollback']),
- machine=dict(type='str'),
- memory=dict(type='int'),
- migrate=dict(type='bool', default=False),
- migrate_downtime=dict(type='int'),
- migrate_speed=dict(type='int'),
- name=dict(type='str'),
- nameservers=dict(type='list', elements='str'),
- net=dict(type='dict'),
- newid=dict(type='int'),
- node=dict(),
- numa=dict(type='dict'),
- numa_enabled=dict(type='bool'),
- onboot=dict(type='bool'),
- ostype=dict(choices=['other', 'wxp', 'w2k', 'w2k3', 'w2k8', 'wvista', 'win7', 'win8', 'win10', 'win11', 'l24', 'l26', 'solaris']),
- parallel=dict(type='dict'),
- pool=dict(type='str'),
- protection=dict(type='bool'),
- reboot=dict(type='bool'),
- revert=dict(type='str'),
- sata=dict(type='dict'),
- scsi=dict(type='dict'),
- scsihw=dict(choices=['lsi', 'lsi53c810', 'virtio-scsi-pci', 'virtio-scsi-single', 'megasas', 'pvscsi']),
- serial=dict(type='dict'),
- searchdomains=dict(type='list', elements='str'),
- shares=dict(type='int'),
- skiplock=dict(type='bool'),
- smbios=dict(type='str'),
- snapname=dict(type='str'),
- sockets=dict(type='int'),
- sshkeys=dict(type='str', no_log=False),
- startdate=dict(type='str'),
- startup=dict(),
- state=dict(default='present', choices=['present', 'absent', 'stopped', 'started', 'restarted', 'current', 'template', 'paused', 'hibernated']),
- storage=dict(type='str'),
- tablet=dict(type='bool'),
- tags=dict(type='list', elements='str'),
- target=dict(type='str'),
- tdf=dict(type='bool'),
- template=dict(type='bool'),
- timeout=dict(type='int', default=30),
- tpmstate0=dict(type='dict',
- options=dict(
- storage=dict(type='str', required=True),
- version=dict(type='str', choices=['2.0', '1.2'], default='2.0')
- )),
- usb=dict(type='dict'),
- update=dict(type='bool', default=False),
- update_unsafe=dict(type='bool', default=False),
- vcpus=dict(type='int'),
- vga=dict(choices=['std', 'cirrus', 'vmware', 'qxl', 'serial0', 'serial1', 'serial2', 'serial3', 'qxl2', 'qxl3', 'qxl4']),
- virtio=dict(type='dict'),
- vmid=dict(type='int'),
- watchdog=dict(),
- )
- module_args.update(kvm_args)
-
- module = AnsibleModule(
- argument_spec=module_args,
- mutually_exclusive=[('delete', 'revert'), ('delete', 'update'), ('revert', 'update'), ('clone', 'update'), ('clone', 'delete'), ('clone', 'revert')],
- required_together=[('api_token_id', 'api_token_secret')],
- required_one_of=[('name', 'vmid'), ('api_password', 'api_token_id')],
- required_if=[('state', 'present', ['node'])],
- )
-
- clone = module.params['clone']
- cpu = module.params['cpu']
- cores = module.params['cores']
- delete = module.params['delete']
- migrate = module.params['migrate']
- memory = module.params['memory']
- name = module.params['name']
- newid = module.params['newid']
- node = module.params['node']
- revert = module.params['revert']
- sockets = module.params['sockets']
- state = module.params['state']
- update = bool(module.params['update'])
- update_unsafe = bool(module.params['update_unsafe'])
- vmid = module.params['vmid']
- validate_certs = module.params['validate_certs']
-
- if module.params['format'] == 'unspecified':
- module.params['format'] = None
-
- proxmox = ProxmoxKvmAnsible(module)
-
- # If vmid is not defined then retrieve its value from the vm name,
- # the cloned vm name or retrieve the next free VM id from ProxmoxAPI.
- if not vmid:
- if state == 'present' and not update and not clone and not delete and not revert and not migrate:
- existing_vmid = proxmox.get_vmid(name, ignore_missing=True)
- if existing_vmid:
- vmid = existing_vmid
- else:
- try:
- vmid = proxmox.get_nextvmid()
- except Exception:
- module.fail_json(msg="Can't get the next vmid for VM {0} automatically. Ensure your cluster state is good".format(name))
- else:
- clone_target = clone or name
- vmid = proxmox.get_vmid(clone_target, ignore_missing=True)
-
- if clone is not None:
- # If newid is not defined then retrieve the next free id from ProxmoxAPI
- if not newid:
- try:
- newid = proxmox.get_nextvmid()
- except Exception:
- module.fail_json(msg="Can't get the next vmid for VM {0} automatically. Ensure your cluster state is good".format(name))
-
- # Ensure source VM name exists when cloning
- if not vmid:
- module.fail_json(msg='VM with name = %s does not exist in cluster' % clone)
-
- # Ensure source VM id exists when cloning
- proxmox.get_vm(vmid)
-
- # Ensure the chosen VM name doesn't already exist when cloning
- existing_vmid = proxmox.get_vmid(name, ignore_missing=True)
- if existing_vmid:
- module.exit_json(changed=False, vmid=existing_vmid, msg="VM with name <%s> already exists" % name)
-
- # Ensure the chosen VM id doesn't already exist when cloning
- if proxmox.get_vm(newid, ignore_missing=True):
- module.exit_json(changed=False, vmid=vmid, msg="vmid %s with VM name %s already exists" % (newid, name))
-
- if delete is not None:
- try:
- proxmox.settings(vmid, node, delete=delete)
- module.exit_json(changed=True, vmid=vmid, msg="Settings has deleted on VM {0} with vmid {1}".format(name, vmid))
- except Exception as e:
- module.fail_json(vmid=vmid, msg='Unable to delete settings on VM {0} with vmid {1}: '.format(name, vmid) + str(e))
-
- if revert is not None:
- try:
- proxmox.settings(vmid, node, revert=revert)
- module.exit_json(changed=True, vmid=vmid, msg="Settings has reverted on VM {0} with vmid {1}".format(name, vmid))
- except Exception as e:
- module.fail_json(vmid=vmid, msg='Unable to revert settings on VM {0} with vmid {1}: Maybe is not a pending task... '.format(name, vmid) + str(e))
-
- if migrate:
- try:
- vm = proxmox.get_vm(vmid)
- vm_node = vm['node']
- if node != vm_node:
- proxmox.migrate_vm(vm, node)
- module.exit_json(changed=True, vmid=vmid, msg="VM {0} has been migrated from {1} to {2}".format(vmid, vm_node, node))
- else:
- module.exit_json(changed=False, vmid=vmid, msg="VM {0} is already on {1}".format(vmid, node))
- except Exception as e:
- module.fail_json(vmid=vmid, msg='Unable to migrate VM {0} from {1} to {2}: {3}'.format(vmid, vm_node, node, e))
-
- if state == 'present':
- if not (update or clone) and proxmox.get_vm(vmid, ignore_missing=True):
- module.exit_json(changed=False, vmid=vmid, msg="VM with vmid <%s> already exists" % vmid)
- elif not (update or clone or vmid) and proxmox.get_vmid(name, ignore_missing=True):
- module.exit_json(changed=False, vmid=proxmox.get_vmid(name), msg="VM with name <%s> already exists" % name)
- elif not node:
- module.fail_json(msg='node is mandatory for creating/updating VM')
- elif update and not any([vmid, name]):
- module.fail_json(msg='vmid or name is mandatory for updating VM')
- elif not proxmox.get_node(node):
- module.fail_json(msg="node '%s' does not exist in cluster" % node)
-
- try:
- proxmox.create_vm(vmid, newid, node, name, memory, cpu, cores, sockets, update, update_unsafe,
- archive=module.params['archive'],
- acpi=module.params['acpi'],
- agent=module.params['agent'],
- audio=module.params['audio'],
- autostart=module.params['autostart'],
- balloon=module.params['balloon'],
- bios=module.params['bios'],
- boot=module.params['boot'],
- bootdisk=module.params['bootdisk'],
- cicustom=module.params['cicustom'],
- cipassword=module.params['cipassword'],
- citype=module.params['citype'],
- ciupgrade=module.params['ciupgrade'],
- ciuser=module.params['ciuser'],
- cpulimit=module.params['cpulimit'],
- cpuunits=module.params['cpuunits'],
- description=module.params['description'],
- digest=module.params['digest'],
- efidisk0=module.params['efidisk0'],
- force=module.params['force'],
- freeze=module.params['freeze'],
- hookscript=module.params['hookscript'],
- hostpci=module.params['hostpci'],
- hotplug=module.params['hotplug'],
- hugepages=module.params['hugepages'],
- ide=module.params['ide'],
- ipconfig=module.params['ipconfig'],
- keyboard=module.params['keyboard'],
- kvm=module.params['kvm'],
- localtime=module.params['localtime'],
- lock=module.params['lock'],
- machine=module.params['machine'],
- migrate_downtime=module.params['migrate_downtime'],
- migrate_speed=module.params['migrate_speed'],
- net=module.params['net'],
- numa=module.params['numa'],
- numa_enabled=module.params['numa_enabled'],
- onboot=module.params['onboot'],
- ostype=module.params['ostype'],
- parallel=module.params['parallel'],
- pool=module.params['pool'],
- protection=module.params['protection'],
- reboot=module.params['reboot'],
- sata=module.params['sata'],
- scsi=module.params['scsi'],
- scsihw=module.params['scsihw'],
- serial=module.params['serial'],
- shares=module.params['shares'],
- skiplock=module.params['skiplock'],
- smbios1=module.params['smbios'],
- snapname=module.params['snapname'],
- sshkeys=module.params['sshkeys'],
- startdate=module.params['startdate'],
- startup=module.params['startup'],
- tablet=module.params['tablet'],
- tags=module.params['tags'],
- target=module.params['target'],
- tdf=module.params['tdf'],
- template=module.params['template'],
- tpmstate0=module.params['tpmstate0'],
- usb=module.params['usb'],
- vcpus=module.params['vcpus'],
- vga=module.params['vga'],
- virtio=module.params['virtio'],
- watchdog=module.params['watchdog'])
-
- if not clone:
- proxmox.get_vminfo(node, vmid,
- ide=module.params['ide'],
- net=module.params['net'],
- sata=module.params['sata'],
- scsi=module.params['scsi'],
- virtio=module.params['virtio'])
- except Exception as e:
- if update:
- module.fail_json(vmid=vmid, msg="Unable to update vm {0} with vmid {1}=".format(name, vmid) + str(e))
- elif clone is not None:
- module.fail_json(vmid=vmid, msg="Unable to clone vm {0} from vmid {1}=".format(name, vmid) + str(e))
- else:
- module.fail_json(vmid=vmid, msg="creation of qemu VM %s with vmid %s failed with exception=%s" % (name, vmid, e))
-
- if update:
- module.exit_json(changed=True, vmid=vmid, msg="VM %s with vmid %s updated" % (name, vmid))
- elif clone is not None:
- module.exit_json(changed=True, vmid=newid, msg="VM %s with newid %s cloned from vm with vmid %s" % (name, newid, vmid))
- else:
- module.exit_json(changed=True, msg="VM %s with vmid %s deployed" % (name, vmid), **results)
-
- elif state == 'started':
- if not vmid:
- module.fail_json(msg='VM with name = %s does not exist in cluster' % name)
-
- status = {}
- try:
- vm = proxmox.get_vm(vmid)
- current = proxmox.proxmox_api.nodes(vm['node']).qemu(vmid).status.current.get()['status']
- status['status'] = current
- if current == 'running':
- module.exit_json(changed=False, vmid=vmid, msg="VM %s is already running" % vmid, **status)
-
- if proxmox.start_vm(vm):
- module.exit_json(changed=True, vmid=vmid, msg="VM %s started" % vmid, **status)
- except Exception as e:
- module.fail_json(vmid=vmid, msg="starting of VM %s failed with exception: %s" % (vmid, e), **status)
-
- elif state == 'stopped':
- if not vmid:
- module.fail_json(msg='VM with name = %s does not exist in cluster' % name)
-
- status = {}
- try:
- vm = proxmox.get_vm(vmid)
- current = proxmox.proxmox_api.nodes(vm['node']).qemu(vmid).status.current.get()['status']
- status['status'] = current
- if current == 'stopped':
- module.exit_json(changed=False, vmid=vmid, msg="VM %s is already stopped" % vmid, **status)
-
- proxmox.stop_vm(vm, force=module.params['force'], timeout=module.params['timeout'])
- module.exit_json(changed=True, vmid=vmid, msg="VM %s is shutting down" % vmid, **status)
- except Exception as e:
- module.fail_json(vmid=vmid, msg="stopping of VM %s failed with exception: %s" % (vmid, e), **status)
-
- elif state == 'template':
- if not vmid:
- module.fail_json(msg='VM with name = %s does not exist in cluster' % name)
-
- status = {}
- try:
- vm = proxmox.get_vm(vmid)
-
- if vm['template'] == 1:
- module.exit_json(changed=False, vmid=vmid, msg="VM %s is already a template" % vmid, **status)
-
- if proxmox.convert_to_template(vm, force=module.params['force'], timeout=module.params['timeout']):
- module.exit_json(changed=True, vmid=vmid, msg="VM %s is converting to template" % vmid, **status)
- except Exception as e:
- module.fail_json(vmid=vmid, msg="conversion of VM %s to template failed with exception: %s" % (vmid, e), **status)
-
- elif state == 'restarted':
- if not vmid:
- module.fail_json(msg='VM with name = %s does not exist in cluster' % name)
-
- status = {}
- vm = proxmox.get_vm(vmid)
- current = proxmox.proxmox_api.nodes(vm['node']).qemu(vmid).status.current.get()['status']
- status['status'] = current
- if current == 'stopped':
- module.exit_json(changed=False, vmid=vmid, msg="VM %s is not running" % vmid, **status)
-
- if proxmox.restart_vm(vm, force=module.params['force']):
- module.exit_json(changed=True, vmid=vmid, msg="VM %s is restarted" % vmid, **status)
-
- elif state == 'absent':
- status = {}
- if not vmid:
- module.exit_json(changed=False, msg='VM with name = %s is already absent' % name)
-
- try:
- vm = proxmox.get_vm(vmid, ignore_missing=True)
- if not vm:
- module.exit_json(changed=False, vmid=vmid)
-
- proxmox_node = proxmox.proxmox_api.nodes(vm['node'])
- current = proxmox_node.qemu(vmid).status.current.get()['status']
- status['status'] = current
- if current == 'running':
- if module.params['force']:
- proxmox.stop_vm(vm, True, timeout=module.params['timeout'])
- else:
- module.exit_json(changed=False, vmid=vmid, msg="VM %s is running. Stop it before deletion or use force=true." % vmid)
- taskid = proxmox_node.qemu.delete(vmid)
- if not proxmox.wait_for_task(vm['node'], taskid):
- module.fail_json(msg='Reached timeout while waiting for removing VM. Last line in task before timeout: %s' %
- proxmox_node.tasks(taskid).log.get()[:1])
- else:
- module.exit_json(changed=True, vmid=vmid, msg="VM %s removed" % vmid)
- except Exception as e:
- module.fail_json(msg="deletion of VM %s failed with exception: %s" % (vmid, e))
-
- elif state == 'current':
- status = {}
- if not vmid:
- module.fail_json(msg='VM with name = %s does not exist in cluster' % name)
- vm = proxmox.get_vm(vmid)
- if not name:
- name = vm.get('name', '(unnamed)')
- current = proxmox.proxmox_api.nodes(vm['node']).qemu(vmid).status.current.get()['status']
- status['status'] = current
- if status:
- module.exit_json(changed=False, vmid=vmid, msg="VM %s with vmid = %s is %s" % (name, vmid, current), **status)
-
- elif state in ['paused', 'hibernated']:
- if not vmid:
- module.fail_json(msg='VM with name = %s does not exist in cluster' % name)
-
- status = {}
- try:
- vm = proxmox.get_vm(vmid)
- current = proxmox.proxmox_api.nodes(vm['node']).qemu(vmid).status.current.get()['status']
- status['status'] = current
- if current != 'running':
- module.exit_json(changed=False, vmid=vmid, msg="VM %s is not running" % vmid, **status)
-
- proxmox.suspend_vm(vm, force=module.params['force'], timeout=module.params['timeout'], todisk=(state == 'hibernated'))
- module.exit_json(changed=True, vmid=vmid, msg="VM %s is suspending" % vmid, **status)
- except Exception as e:
- module.fail_json(vmid=vmid, msg="suspending of VM %s failed with exception: %s" % (vmid, e), **status)
-
-
-if __name__ == '__main__':
- main()
diff --git a/plugins/modules/proxmox_nic.py b/plugins/modules/proxmox_nic.py
deleted file mode 100644
index bd119fe5cc..0000000000
--- a/plugins/modules/proxmox_nic.py
+++ /dev/null
@@ -1,313 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-#
-# Copyright (c) 2021, Lammert Hellinga (@Kogelvis)
-# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-DOCUMENTATION = r"""
-module: proxmox_nic
-short_description: Management of a NIC of a Qemu(KVM) VM in a Proxmox VE cluster
-version_added: 3.1.0
-description:
- - Allows you to create/update/delete a NIC on Qemu(KVM) Virtual Machines in a Proxmox VE cluster.
-author: "Lammert Hellinga (@Kogelvis) "
-attributes:
- check_mode:
- support: full
- diff_mode:
- support: none
- action_group:
- version_added: 9.0.0
-options:
- bridge:
- description:
- - Add this interface to the specified bridge device. The Proxmox VE default bridge is called V(vmbr0).
- type: str
- firewall:
- description:
- - Whether this interface should be protected by the firewall.
- type: bool
- default: false
- interface:
- description:
- - Name of the interface, should be V(net[n]) where C(1 ≤ n ≤ 31).
- type: str
- required: true
- link_down:
- description:
- - Whether this interface should be disconnected (like pulling the plug).
- type: bool
- default: false
- mac:
- description:
- - V(XX:XX:XX:XX:XX:XX) should be a unique MAC address. This is automatically generated if not specified.
- - When not specified this module will keep the MAC address the same when changing an existing interface.
- type: str
- model:
- description:
- - The NIC emulator model.
- type: str
- choices: ['e1000', 'e1000-82540em', 'e1000-82544gc', 'e1000-82545em', 'i82551', 'i82557b', 'i82559er', 'ne2k_isa', 'ne2k_pci',
- 'pcnet', 'rtl8139', 'virtio', 'vmxnet3']
- default: virtio
- mtu:
- description:
- - Force MTU, for C(virtio) model only, setting will be ignored otherwise.
- - Set to V(1) to use the bridge MTU.
- - Value should be C(1 ≤ n ≤ 65520).
- type: int
- name:
- description:
- - Specifies the VM name. Only used on the configuration web interface.
- - Required only for O(state=present).
- type: str
- queues:
- description:
- - Number of packet queues to be used on the device.
- - Value should be C(0 ≤ n ≤ 16).
- type: int
- rate:
- description:
- - Rate limit in MBps (MegaBytes per second) as floating point number.
- type: float
- state:
- description:
- - Indicates desired state of the NIC.
- type: str
- choices: ['present', 'absent']
- default: present
- tag:
- description:
- - VLAN tag to apply to packets on this interface.
- - Value should be C(1 ≤ n ≤ 4094).
- type: int
- trunks:
- description:
- - List of VLAN trunks to pass through this interface.
- type: list
- elements: int
- vmid:
- description:
- - Specifies the instance ID.
- type: int
-extends_documentation_fragment:
- - community.general.proxmox.actiongroup_proxmox
- - community.general.proxmox.documentation
- - community.general.attributes
-"""
-
-EXAMPLES = r"""
-- name: Create NIC net0 targeting the vm by name
- community.general.proxmox_nic:
- api_user: root@pam
- api_password: secret
- api_host: proxmoxhost
- name: my_vm
- interface: net0
- bridge: vmbr0
- tag: 3
-
-- name: Create NIC net0 targeting the vm by id
- community.general.proxmox_nic:
- api_user: root@pam
- api_password: secret
- api_host: proxmoxhost
- vmid: 103
- interface: net0
- bridge: vmbr0
- mac: "12:34:56:C0:FF:EE"
- firewall: true
-
-- name: Delete NIC net0 targeting the vm by name
- community.general.proxmox_nic:
- api_user: root@pam
- api_password: secret
- api_host: proxmoxhost
- name: my_vm
- interface: net0
- state: absent
-"""
-
-RETURN = r"""
-vmid:
- description: The VM vmid.
- returned: success
- type: int
- sample: 115
-msg:
- description: A short message.
- returned: always
- type: str
- sample: "Nic net0 unchanged on VM with vmid 103"
-"""
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible_collections.community.general.plugins.module_utils.proxmox import (proxmox_auth_argument_spec, ProxmoxAnsible)
-
-
-class ProxmoxNicAnsible(ProxmoxAnsible):
- def update_nic(self, vmid, interface, model, **kwargs):
- vm = self.get_vm(vmid)
-
- try:
- vminfo = self.proxmox_api.nodes(vm['node']).qemu(vmid).config.get()
- except Exception as e:
- self.module.fail_json(msg='Getting information for VM with vmid = %s failed with exception: %s' % (vmid, e))
-
- if interface in vminfo:
- # Convert the current config to a dictionary
- config = vminfo[interface].split(',')
- config.sort()
-
- config_current = {}
-
- for i in config:
- kv = i.split('=')
- try:
- config_current[kv[0]] = kv[1]
- except IndexError:
- config_current[kv[0]] = ''
-
- # determine the current model nic and mac-address
- models = ['e1000', 'e1000-82540em', 'e1000-82544gc', 'e1000-82545em', 'i82551', 'i82557b',
- 'i82559er', 'ne2k_isa', 'ne2k_pci', 'pcnet', 'rtl8139', 'virtio', 'vmxnet3']
- current_model = set(models) & set(config_current.keys())
- current_model = current_model.pop()
- current_mac = config_current[current_model]
-
- # build nic config string
- config_provided = "{0}={1}".format(model, current_mac)
- else:
- config_provided = model
-
- if kwargs['mac']:
- config_provided = "{0}={1}".format(model, kwargs['mac'])
-
- if kwargs['bridge']:
- config_provided += ",bridge={0}".format(kwargs['bridge'])
-
- if kwargs['firewall']:
- config_provided += ",firewall=1"
-
- if kwargs['link_down']:
- config_provided += ',link_down=1'
-
- if kwargs['mtu']:
- config_provided += ",mtu={0}".format(kwargs['mtu'])
- if model != 'virtio':
- self.module.warn(
- 'Ignoring MTU for nic {0} on VM with vmid {1}, '
- 'model should be set to \'virtio\': '.format(interface, vmid))
-
- if kwargs['queues']:
- config_provided += ",queues={0}".format(kwargs['queues'])
-
- if kwargs['rate']:
- config_provided += ",rate={0}".format(kwargs['rate'])
-
- if kwargs['tag']:
- config_provided += ",tag={0}".format(kwargs['tag'])
-
- if kwargs['trunks']:
- config_provided += ",trunks={0}".format(';'.join(str(x) for x in kwargs['trunks']))
-
- net = {interface: config_provided}
- vm = self.get_vm(vmid)
-
- if ((interface not in vminfo) or (vminfo[interface] != config_provided)):
- if not self.module.check_mode:
- self.proxmox_api.nodes(vm['node']).qemu(vmid).config.set(**net)
- return True
-
- return False
-
- def delete_nic(self, vmid, interface):
- vm = self.get_vm(vmid)
- vminfo = self.proxmox_api.nodes(vm['node']).qemu(vmid).config.get()
-
- if interface in vminfo:
- if not self.module.check_mode:
- self.proxmox_api.nodes(vm['node']).qemu(vmid).config.set(delete=interface)
- return True
-
- return False
-
-
-def main():
- module_args = proxmox_auth_argument_spec()
- nic_args = dict(
- bridge=dict(type='str'),
- firewall=dict(type='bool', default=False),
- interface=dict(type='str', required=True),
- link_down=dict(type='bool', default=False),
- mac=dict(type='str'),
- model=dict(choices=['e1000', 'e1000-82540em', 'e1000-82544gc', 'e1000-82545em',
- 'i82551', 'i82557b', 'i82559er', 'ne2k_isa', 'ne2k_pci', 'pcnet',
- 'rtl8139', 'virtio', 'vmxnet3'], default='virtio'),
- mtu=dict(type='int'),
- name=dict(type='str'),
- queues=dict(type='int'),
- rate=dict(type='float'),
- state=dict(default='present', choices=['present', 'absent']),
- tag=dict(type='int'),
- trunks=dict(type='list', elements='int'),
- vmid=dict(type='int'),
- )
- module_args.update(nic_args)
-
- module = AnsibleModule(
- argument_spec=module_args,
- required_together=[('api_token_id', 'api_token_secret')],
- required_one_of=[('name', 'vmid'), ('api_password', 'api_token_id')],
- supports_check_mode=True,
- )
-
- proxmox = ProxmoxNicAnsible(module)
-
- interface = module.params['interface']
- model = module.params['model']
- name = module.params['name']
- state = module.params['state']
- vmid = module.params['vmid']
-
- # If vmid is not defined then retrieve its value from the vm name,
- if not vmid:
- vmid = proxmox.get_vmid(name)
-
- # Ensure VM id exists
- proxmox.get_vm(vmid)
-
- if state == 'present':
- try:
- if proxmox.update_nic(vmid, interface, model,
- bridge=module.params['bridge'],
- firewall=module.params['firewall'],
- link_down=module.params['link_down'],
- mac=module.params['mac'],
- mtu=module.params['mtu'],
- queues=module.params['queues'],
- rate=module.params['rate'],
- tag=module.params['tag'],
- trunks=module.params['trunks']):
- module.exit_json(changed=True, vmid=vmid, msg="Nic {0} updated on VM with vmid {1}".format(interface, vmid))
- else:
- module.exit_json(vmid=vmid, msg="Nic {0} unchanged on VM with vmid {1}".format(interface, vmid))
- except Exception as e:
- module.fail_json(vmid=vmid, msg='Unable to change nic {0} on VM with vmid {1}: '.format(interface, vmid) + str(e))
-
- elif state == 'absent':
- try:
- if proxmox.delete_nic(vmid, interface):
- module.exit_json(changed=True, vmid=vmid, msg="Nic {0} deleted on VM with vmid {1}".format(interface, vmid))
- else:
- module.exit_json(vmid=vmid, msg="Nic {0} does not exist on VM with vmid {1}".format(interface, vmid))
- except Exception as e:
- module.fail_json(vmid=vmid, msg='Unable to delete nic {0} on VM with vmid {1}: '.format(interface, vmid) + str(e))
-
-
-if __name__ == '__main__':
- main()
diff --git a/plugins/modules/proxmox_node_info.py b/plugins/modules/proxmox_node_info.py
deleted file mode 100644
index e243862134..0000000000
--- a/plugins/modules/proxmox_node_info.py
+++ /dev/null
@@ -1,143 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-#
-# Copyright John Berninger (@jberning)
-# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = r"""
-module: proxmox_node_info
-short_description: Retrieve information about one or more Proxmox VE nodes
-version_added: 8.2.0
-description:
- - Retrieve information about one or more Proxmox VE nodes.
-author: John Berninger (@jwbernin)
-attributes:
- action_group:
- version_added: 9.0.0
-extends_documentation_fragment:
- - community.general.proxmox.actiongroup_proxmox
- - community.general.proxmox.documentation
- - community.general.attributes
- - community.general.attributes.info_module
-"""
-
-
-EXAMPLES = r"""
-- name: List existing nodes
- community.general.proxmox_node_info:
- api_host: proxmox1
- api_user: root@pam
- api_password: "{{ password | default(omit) }}"
- api_token_id: "{{ token_id | default(omit) }}"
- api_token_secret: "{{ token_secret | default(omit) }}"
- register: proxmox_nodes
-"""
-
-
-RETURN = r"""
-proxmox_nodes:
- description: List of Proxmox VE nodes.
- returned: always, but can be empty
- type: list
- elements: dict
- contains:
- cpu:
- description: Current CPU usage in fractional shares of this host's total available CPU.
- returned: on success
- type: float
- disk:
- description: Current local disk usage of this host.
- returned: on success
- type: int
- id:
- description: Identity of the node.
- returned: on success
- type: str
- level:
- description: Support level. Can be blank if not under a paid support contract.
- returned: on success
- type: str
- maxcpu:
- description: Total number of available CPUs on this host.
- returned: on success
- type: int
- maxdisk:
- description: Size of local disk in bytes.
- returned: on success
- type: int
- maxmem:
- description: Memory size in bytes.
- returned: on success
- type: int
- mem:
- description: Used memory in bytes.
- returned: on success
- type: int
- node:
- description: Short hostname of this node.
- returned: on success
- type: str
- ssl_fingerprint:
- description: SSL fingerprint of the node certificate.
- returned: on success
- type: str
- status:
- description: Node status.
- returned: on success
- type: str
- type:
- description: Object type being returned.
- returned: on success
- type: str
- uptime:
- description: Node uptime in seconds.
- returned: on success
- type: int
-"""
-
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible_collections.community.general.plugins.module_utils.proxmox import (
- proxmox_auth_argument_spec, ProxmoxAnsible)
-
-
-class ProxmoxNodeInfoAnsible(ProxmoxAnsible):
- def get_nodes(self):
- nodes = self.proxmox_api.nodes.get()
- return nodes
-
-
-def proxmox_node_info_argument_spec():
- return dict()
-
-
-def main():
- module_args = proxmox_auth_argument_spec()
- node_info_args = proxmox_node_info_argument_spec()
- module_args.update(node_info_args)
-
- module = AnsibleModule(
- argument_spec=module_args,
- required_one_of=[('api_password', 'api_token_id')],
- required_together=[('api_token_id', 'api_token_secret')],
- supports_check_mode=True,
- )
- result = dict(
- changed=False
- )
-
- proxmox = ProxmoxNodeInfoAnsible(module)
-
- nodes = proxmox.get_nodes()
- result['proxmox_nodes'] = nodes
-
- module.exit_json(**result)
-
-
-if __name__ == '__main__':
- main()
diff --git a/plugins/modules/proxmox_pool.py b/plugins/modules/proxmox_pool.py
deleted file mode 100644
index c53e394eeb..0000000000
--- a/plugins/modules/proxmox_pool.py
+++ /dev/null
@@ -1,182 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-#
-# Copyright (c) 2023, Sergei Antipov (UnderGreen)
-# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
-# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-DOCUMENTATION = r"""
-module: proxmox_pool
-short_description: Pool management for Proxmox VE cluster
-description:
- - Create or delete a pool for Proxmox VE clusters.
- - For pool members management please consult M(community.general.proxmox_pool_member) module.
-version_added: 7.1.0
-author: "Sergei Antipov (@UnderGreen) "
-attributes:
- check_mode:
- support: full
- diff_mode:
- support: none
- action_group:
- version_added: 9.0.0
-options:
- poolid:
- description:
- - The pool ID.
- type: str
- aliases: ["name"]
- required: true
- state:
- description:
- - Indicate desired state of the pool.
- - The pool must be empty prior deleting it with O(state=absent).
- choices: ['present', 'absent']
- default: present
- type: str
- comment:
- description:
- - Specify the description for the pool.
- - Parameter is ignored when pool already exists or O(state=absent).
- type: str
-
-extends_documentation_fragment:
- - community.general.proxmox.actiongroup_proxmox
- - community.general.proxmox.documentation
- - community.general.attributes
-"""
-
-EXAMPLES = r"""
-- name: Create new Proxmox VE pool
- community.general.proxmox_pool:
- api_host: node1
- api_user: root@pam
- api_password: password
- poolid: test
- comment: 'New pool'
-
-- name: Delete the Proxmox VE pool
- community.general.proxmox_pool:
- api_host: node1
- api_user: root@pam
- api_password: password
- poolid: test
- state: absent
-"""
-
-RETURN = r"""
-poolid:
- description: The pool ID.
- returned: success
- type: str
- sample: test
-msg:
- description: A short message on what the module did.
- returned: always
- type: str
- sample: "Pool test successfully created"
-"""
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible_collections.community.general.plugins.module_utils.proxmox import (proxmox_auth_argument_spec, ProxmoxAnsible)
-
-
-class ProxmoxPoolAnsible(ProxmoxAnsible):
-
- def is_pool_existing(self, poolid):
- """Check whether pool already exist
-
- :param poolid: str - name of the pool
- :return: bool - is pool exists?
- """
- try:
- pools = self.proxmox_api.pools.get()
- for pool in pools:
- if pool['poolid'] == poolid:
- return True
- return False
- except Exception as e:
- self.module.fail_json(msg="Unable to retrieve pools: {0}".format(e))
-
- def is_pool_empty(self, poolid):
- """Check whether pool has members
-
- :param poolid: str - name of the pool
- :return: bool - is pool empty?
- """
- return True if not self.get_pool(poolid)['members'] else False
-
- def create_pool(self, poolid, comment=None):
- """Create Proxmox VE pool
-
- :param poolid: str - name of the pool
- :param comment: str, optional - Description of a pool
- :return: None
- """
- if self.is_pool_existing(poolid):
- self.module.exit_json(changed=False, poolid=poolid, msg="Pool {0} already exists".format(poolid))
-
- if self.module.check_mode:
- return
-
- try:
- self.proxmox_api.pools.post(poolid=poolid, comment=comment)
- except Exception as e:
- self.module.fail_json(msg="Failed to create pool with ID {0}: {1}".format(poolid, e))
-
- def delete_pool(self, poolid):
- """Delete Proxmox VE pool
-
- :param poolid: str - name of the pool
- :return: None
- """
- if not self.is_pool_existing(poolid):
- self.module.exit_json(changed=False, poolid=poolid, msg="Pool {0} doesn't exist".format(poolid))
-
- if self.is_pool_empty(poolid):
- if self.module.check_mode:
- return
-
- try:
- self.proxmox_api.pools(poolid).delete()
- except Exception as e:
- self.module.fail_json(msg="Failed to delete pool with ID {0}: {1}".format(poolid, e))
- else:
- self.module.fail_json(msg="Can't delete pool {0} with members. Please remove members from pool first.".format(poolid))
-
-
-def main():
- module_args = proxmox_auth_argument_spec()
- pools_args = dict(
- poolid=dict(type="str", aliases=["name"], required=True),
- comment=dict(type="str"),
- state=dict(default="present", choices=["present", "absent"]),
- )
-
- module_args.update(pools_args)
-
- module = AnsibleModule(
- argument_spec=module_args,
- required_together=[("api_token_id", "api_token_secret")],
- required_one_of=[("api_password", "api_token_id")],
- supports_check_mode=True
- )
-
- poolid = module.params["poolid"]
- comment = module.params["comment"]
- state = module.params["state"]
-
- proxmox = ProxmoxPoolAnsible(module)
-
- if state == "present":
- proxmox.create_pool(poolid, comment)
- module.exit_json(changed=True, poolid=poolid, msg="Pool {0} successfully created".format(poolid))
- else:
- proxmox.delete_pool(poolid)
- module.exit_json(changed=True, poolid=poolid, msg="Pool {0} successfully deleted".format(poolid))
-
-
-if __name__ == "__main__":
- main()
diff --git a/plugins/modules/proxmox_pool_member.py b/plugins/modules/proxmox_pool_member.py
deleted file mode 100644
index bd32e94e42..0000000000
--- a/plugins/modules/proxmox_pool_member.py
+++ /dev/null
@@ -1,240 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-#
-# Copyright (c) 2023, Sergei Antipov (UnderGreen)
-# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
-# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-DOCUMENTATION = r"""
-module: proxmox_pool_member
-short_description: Add or delete members from Proxmox VE cluster pools
-description:
- - Create or delete a pool member in Proxmox VE clusters.
-version_added: 7.1.0
-author: "Sergei Antipov (@UnderGreen) "
-attributes:
- check_mode:
- support: full
- diff_mode:
- support: full
- action_group:
- version_added: 9.0.0
-options:
- poolid:
- description:
- - The pool ID.
- type: str
- aliases: ["name"]
- required: true
- member:
- description:
- - Specify the member name.
- - For O(type=storage) it is a storage name.
- - For O(type=vm) either vmid or vm name could be used.
- type: str
- required: true
- type:
- description:
- - Member type to add/remove from the pool.
- choices: ["vm", "storage"]
- default: vm
- type: str
- state:
- description:
- - Indicate desired state of the pool member.
- choices: ['present', 'absent']
- default: present
- type: str
-
-extends_documentation_fragment:
- - community.general.proxmox.actiongroup_proxmox
- - community.general.proxmox.documentation
- - community.general.attributes
-"""
-
-EXAMPLES = r"""
-- name: Add new VM to Proxmox VE pool
- community.general.proxmox_pool_member:
- api_host: node1
- api_user: root@pam
- api_password: password
- poolid: test
- member: 101
-
-- name: Add new storage to Proxmox VE pool
- community.general.proxmox_pool_member:
- api_host: node1
- api_user: root@pam
- api_password: password
- poolid: test
- member: zfs-data
- type: storage
-
-- name: Remove VM from the Proxmox VE pool using VM name
- community.general.proxmox_pool_member:
- api_host: node1
- api_user: root@pam
- api_password: password
- poolid: test
- member: pxe.home.arpa
- state: absent
-
-- name: Remove storage from the Proxmox VE pool
- community.general.proxmox_pool_member:
- api_host: node1
- api_user: root@pam
- api_password: password
- poolid: test
- member: zfs-storage
- type: storage
- state: absent
-"""
-
-RETURN = r"""
-poolid:
- description: The pool ID.
- returned: success
- type: str
- sample: test
-member:
- description: Member name.
- returned: success
- type: str
- sample: 101
-msg:
- description: A short message on what the module did.
- returned: always
- type: str
- sample: "Member 101 deleted from the pool test"
-"""
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible_collections.community.general.plugins.module_utils.proxmox import (proxmox_auth_argument_spec, ProxmoxAnsible)
-
-
-class ProxmoxPoolMemberAnsible(ProxmoxAnsible):
-
- def pool_members(self, poolid):
- vms = []
- storage = []
- for member in self.get_pool(poolid)["members"]:
- if member["type"] == "storage":
- storage.append(member["storage"])
- else:
- vms.append(member["vmid"])
-
- return (vms, storage)
-
- def add_pool_member(self, poolid, member, member_type):
- current_vms_members, current_storage_members = self.pool_members(poolid)
- all_members_before = current_storage_members + current_vms_members
- all_members_after = all_members_before.copy()
- diff = {"before": {"members": all_members_before}, "after": {"members": all_members_after}}
-
- try:
- if member_type == "storage":
- storages = self.get_storages(type=None)
- if member not in [storage["storage"] for storage in storages]:
- self.module.fail_json(msg="Storage {0} doesn't exist in the cluster".format(member))
- if member in current_storage_members:
- self.module.exit_json(changed=False, poolid=poolid, member=member,
- diff=diff, msg="Member {0} is already part of the pool {1}".format(member, poolid))
-
- all_members_after.append(member)
- if self.module.check_mode:
- return diff
-
- self.proxmox_api.pools(poolid).put(storage=[member])
- return diff
- else:
- try:
- vmid = int(member)
- except ValueError:
- vmid = self.get_vmid(member)
-
- if vmid in current_vms_members:
- self.module.exit_json(changed=False, poolid=poolid, member=member,
- diff=diff, msg="VM {0} is already part of the pool {1}".format(member, poolid))
-
- all_members_after.append(member)
-
- if not self.module.check_mode:
- self.proxmox_api.pools(poolid).put(vms=[vmid])
- return diff
- except Exception as e:
- self.module.fail_json(msg="Failed to add a new member ({0}) to the pool {1}: {2}".format(member, poolid, e))
-
- def delete_pool_member(self, poolid, member, member_type):
- current_vms_members, current_storage_members = self.pool_members(poolid)
- all_members_before = current_storage_members + current_vms_members
- all_members_after = all_members_before.copy()
- diff = {"before": {"members": all_members_before}, "after": {"members": all_members_after}}
-
- try:
- if member_type == "storage":
- if member not in current_storage_members:
- self.module.exit_json(changed=False, poolid=poolid, member=member,
- diff=diff, msg="Member {0} is not part of the pool {1}".format(member, poolid))
-
- all_members_after.remove(member)
- if self.module.check_mode:
- return diff
-
- self.proxmox_api.pools(poolid).put(storage=[member], delete=1)
- return diff
- else:
- try:
- vmid = int(member)
- except ValueError:
- vmid = self.get_vmid(member)
-
- if vmid not in current_vms_members:
- self.module.exit_json(changed=False, poolid=poolid, member=member,
- diff=diff, msg="VM {0} is not part of the pool {1}".format(member, poolid))
-
- all_members_after.remove(vmid)
-
- if not self.module.check_mode:
- self.proxmox_api.pools(poolid).put(vms=[vmid], delete=1)
- return diff
- except Exception as e:
- self.module.fail_json(msg="Failed to delete a member ({0}) from the pool {1}: {2}".format(member, poolid, e))
-
-
-def main():
- module_args = proxmox_auth_argument_spec()
- pool_members_args = dict(
- poolid=dict(type="str", aliases=["name"], required=True),
- member=dict(type="str", required=True),
- type=dict(default="vm", choices=["vm", "storage"]),
- state=dict(default="present", choices=["present", "absent"]),
- )
-
- module_args.update(pool_members_args)
-
- module = AnsibleModule(
- argument_spec=module_args,
- required_together=[("api_token_id", "api_token_secret")],
- required_one_of=[("api_password", "api_token_id")],
- supports_check_mode=True
- )
-
- poolid = module.params["poolid"]
- member = module.params["member"]
- member_type = module.params["type"]
- state = module.params["state"]
-
- proxmox = ProxmoxPoolMemberAnsible(module)
-
- if state == "present":
- diff = proxmox.add_pool_member(poolid, member, member_type)
- module.exit_json(changed=True, poolid=poolid, member=member, diff=diff, msg="New member {0} added to the pool {1}".format(member, poolid))
- else:
- diff = proxmox.delete_pool_member(poolid, member, member_type)
- module.exit_json(changed=True, poolid=poolid, member=member, diff=diff, msg="Member {0} deleted from the pool {1}".format(member, poolid))
-
-
-if __name__ == "__main__":
- main()
diff --git a/plugins/modules/proxmox_snap.py b/plugins/modules/proxmox_snap.py
deleted file mode 100644
index 158efe99ec..0000000000
--- a/plugins/modules/proxmox_snap.py
+++ /dev/null
@@ -1,395 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-#
-# Copyright (c) 2020, Jeffrey van Pelt (@Thulium-Drake)
-# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-DOCUMENTATION = r"""
-module: proxmox_snap
-short_description: Snapshot management of instances in Proxmox VE cluster
-version_added: 2.0.0
-description:
- - Allows you to create/delete/restore snapshots from instances in Proxmox VE cluster.
- - Supports both KVM and LXC, OpenVZ has not been tested, as it is no longer supported on Proxmox VE.
-attributes:
- check_mode:
- support: full
- diff_mode:
- support: none
- action_group:
- version_added: 9.0.0
-options:
- hostname:
- description:
- - The instance name.
- type: str
- vmid:
- description:
- - The instance ID.
- - If not set, will be fetched from PromoxAPI based on the hostname.
- type: str
- state:
- description:
- - Indicate desired state of the instance snapshot.
- - The V(rollback) value was added in community.general 4.8.0.
- choices: ['present', 'absent', 'rollback']
- default: present
- type: str
- force:
- description:
- - For removal from config file, even if removing disk snapshot fails.
- default: false
- type: bool
- unbind:
- description:
- - This option only applies to LXC containers.
- - Allows to snapshot a container even if it has configured mountpoints.
- - Temporarily disables all configured mountpoints, takes snapshot, and finally restores original configuration.
- - If running, the container will be stopped and restarted to apply config changes.
- - Due to restrictions in the Proxmox API this option can only be used authenticating as V(root@pam) with O(api_password),
- API tokens do not work either.
- - See U(https://pve.proxmox.com/pve-docs/api-viewer/#/nodes/{node}/lxc/{vmid}/config) (PUT tab) for more details.
- default: false
- type: bool
- version_added: 5.7.0
- vmstate:
- description:
- - Snapshot includes RAM.
- default: false
- type: bool
- description:
- description:
- - Specify the description for the snapshot. Only used on the configuration web interface.
- - This is saved as a comment inside the configuration file.
- type: str
- timeout:
- description:
- - Timeout for operations.
- default: 30
- type: int
- snapname:
- description:
- - Name of the snapshot that has to be created/deleted/restored.
- default: 'ansible_snap'
- type: str
- retention:
- description:
- - Remove old snapshots if there are more than O(retention) snapshots.
- - If O(retention) is set to V(0), all snapshots will be kept.
- - This is only used when O(state=present) and when an actual snapshot is created. If no snapshot is created, all existing
- snapshots will be kept.
- default: 0
- type: int
- version_added: 7.1.0
-
-notes:
- - Requires proxmoxer and requests modules on host. These modules can be installed with pip.
-requirements: ["proxmoxer", "requests"]
-author: Jeffrey van Pelt (@Thulium-Drake)
-extends_documentation_fragment:
- - community.general.proxmox.actiongroup_proxmox
- - community.general.proxmox.documentation
- - community.general.attributes
-"""
-
-EXAMPLES = r"""
-- name: Create new container snapshot
- community.general.proxmox_snap:
- api_user: root@pam
- api_password: 1q2w3e
- api_host: node1
- vmid: 100
- state: present
- snapname: pre-updates
-
-- name: Create new container snapshot and keep only the 2 newest snapshots
- community.general.proxmox_snap:
- api_user: root@pam
- api_password: 1q2w3e
- api_host: node1
- vmid: 100
- state: present
- snapname: snapshot-42
- retention: 2
-
-- name: Create new snapshot for a container with configured mountpoints
- community.general.proxmox_snap:
- api_user: root@pam
- api_password: 1q2w3e
- api_host: node1
- vmid: 100
- state: present
- unbind: true # requires root@pam+password auth, API tokens are not supported
- snapname: pre-updates
-
-- name: Remove container snapshot
- community.general.proxmox_snap:
- api_user: root@pam
- api_password: 1q2w3e
- api_host: node1
- vmid: 100
- state: absent
- snapname: pre-updates
-
-- name: Rollback container snapshot
- community.general.proxmox_snap:
- api_user: root@pam
- api_password: 1q2w3e
- api_host: node1
- vmid: 100
- state: rollback
- snapname: pre-updates
-"""
-
-RETURN = r"""#"""
-
-import time
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible.module_utils.common.text.converters import to_native
-from ansible_collections.community.general.plugins.module_utils.proxmox import (proxmox_auth_argument_spec, ProxmoxAnsible)
-
-
-class ProxmoxSnapAnsible(ProxmoxAnsible):
- def snapshot(self, vm, vmid):
- return getattr(self.proxmox_api.nodes(vm['node']), vm['type'])(vmid).snapshot
-
- def vmconfig(self, vm, vmid):
- return getattr(self.proxmox_api.nodes(vm['node']), vm['type'])(vmid).config
-
- def vmstatus(self, vm, vmid):
- return getattr(self.proxmox_api.nodes(vm['node']), vm['type'])(vmid).status
-
- def _container_mp_get(self, vm, vmid):
- cfg = self.vmconfig(vm, vmid).get()
- mountpoints = {}
- for key, value in cfg.items():
- if key.startswith('mp'):
- mountpoints[key] = value
- return mountpoints
-
- def _container_mp_disable(self, vm, vmid, timeout, unbind, mountpoints, vmstatus):
- # shutdown container if running
- if vmstatus == 'running':
- self.shutdown_instance(vm, vmid, timeout)
- # delete all mountpoints configs
- self.vmconfig(vm, vmid).put(delete=' '.join(mountpoints))
-
- def _container_mp_restore(self, vm, vmid, timeout, unbind, mountpoints, vmstatus):
- # NOTE: requires auth as `root@pam`, API tokens are not supported
- # see https://pve.proxmox.com/pve-docs/api-viewer/#/nodes/{node}/lxc/{vmid}/config
- # restore original config
- self.vmconfig(vm, vmid).put(**mountpoints)
- # start container (if was running before snap)
- if vmstatus == 'running':
- self.start_instance(vm, vmid, timeout)
-
- def start_instance(self, vm, vmid, timeout):
- taskid = self.vmstatus(vm, vmid).start.post()
- while timeout:
- if self.api_task_ok(vm['node'], taskid):
- return True
- timeout -= 1
- if timeout == 0:
- self.module.fail_json(msg='Reached timeout while waiting for VM to start. Last line in task before timeout: %s' %
- self.proxmox_api.nodes(vm['node']).tasks(taskid).log.get()[:1])
- time.sleep(1)
- return False
-
- def shutdown_instance(self, vm, vmid, timeout):
- taskid = self.vmstatus(vm, vmid).shutdown.post()
- while timeout:
- if self.api_task_ok(vm['node'], taskid):
- return True
- timeout -= 1
- if timeout == 0:
- self.module.fail_json(msg='Reached timeout while waiting for VM to stop. Last line in task before timeout: %s' %
- self.proxmox_api.nodes(vm['node']).tasks(taskid).log.get()[:1])
- time.sleep(1)
- return False
-
- def snapshot_retention(self, vm, vmid, retention):
- # ignore the last snapshot, which is the current state
- snapshots = self.snapshot(vm, vmid).get()[:-1]
- if retention > 0 and len(snapshots) > retention:
- # sort by age, oldest first
- for snap in sorted(snapshots, key=lambda x: x['snaptime'])[:len(snapshots) - retention]:
- self.snapshot(vm, vmid)(snap['name']).delete()
-
- def snapshot_create(self, vm, vmid, timeout, snapname, description, vmstate, unbind, retention):
- if self.module.check_mode:
- return True
-
- if vm['type'] == 'lxc':
- if unbind is True:
- # check if credentials will work
- # WARN: it is crucial this check runs here!
- # The correct permissions are required only to reconfig mounts.
- # Not checking now would allow to remove the configuration BUT
- # fail later, leaving the container in a misconfigured state.
- if (
- self.module.params['api_user'] != 'root@pam'
- or not self.module.params['api_password']
- ):
- self.module.fail_json(msg='`unbind=True` requires authentication as `root@pam` with `api_password`, API tokens are not supported.')
- return False
- mountpoints = self._container_mp_get(vm, vmid)
- vmstatus = self.vmstatus(vm, vmid).current().get()['status']
- if mountpoints:
- self._container_mp_disable(vm, vmid, timeout, unbind, mountpoints, vmstatus)
- taskid = self.snapshot(vm, vmid).post(snapname=snapname, description=description)
- else:
- taskid = self.snapshot(vm, vmid).post(snapname=snapname, description=description, vmstate=int(vmstate))
-
- while timeout:
- if self.api_task_ok(vm['node'], taskid):
- break
- if timeout == 0:
- self.module.fail_json(msg='Reached timeout while waiting for creating VM snapshot. Last line in task before timeout: %s' %
- self.proxmox_api.nodes(vm['node']).tasks(taskid).log.get()[:1])
-
- time.sleep(1)
- timeout -= 1
- if vm['type'] == 'lxc' and unbind is True and mountpoints:
- self._container_mp_restore(vm, vmid, timeout, unbind, mountpoints, vmstatus)
-
- self.snapshot_retention(vm, vmid, retention)
- return timeout > 0
-
- def snapshot_remove(self, vm, vmid, timeout, snapname, force):
- if self.module.check_mode:
- return True
-
- taskid = self.snapshot(vm, vmid).delete(snapname, force=int(force))
- while timeout:
- if self.api_task_ok(vm['node'], taskid):
- return True
- if timeout == 0:
- self.module.fail_json(msg='Reached timeout while waiting for removing VM snapshot. Last line in task before timeout: %s' %
- self.proxmox_api.nodes(vm['node']).tasks(taskid).log.get()[:1])
-
- time.sleep(1)
- timeout -= 1
- return False
-
- def snapshot_rollback(self, vm, vmid, timeout, snapname):
- if self.module.check_mode:
- return True
-
- taskid = self.snapshot(vm, vmid)(snapname).post("rollback")
- while timeout:
- if self.api_task_ok(vm['node'], taskid):
- return True
- if timeout == 0:
- self.module.fail_json(msg='Reached timeout while waiting for rolling back VM snapshot. Last line in task before timeout: %s' %
- self.proxmox_api.nodes(vm['node']).tasks(taskid).log.get()[:1])
-
- time.sleep(1)
- timeout -= 1
- return False
-
-
-def main():
- module_args = proxmox_auth_argument_spec()
- snap_args = dict(
- vmid=dict(required=False),
- hostname=dict(),
- timeout=dict(type='int', default=30),
- state=dict(default='present', choices=['present', 'absent', 'rollback']),
- description=dict(type='str'),
- snapname=dict(type='str', default='ansible_snap'),
- force=dict(type='bool', default=False),
- unbind=dict(type='bool', default=False),
- vmstate=dict(type='bool', default=False),
- retention=dict(type='int', default=0),
- )
- module_args.update(snap_args)
-
- module = AnsibleModule(
- argument_spec=module_args,
- supports_check_mode=True
- )
-
- proxmox = ProxmoxSnapAnsible(module)
-
- state = module.params['state']
- vmid = module.params['vmid']
- hostname = module.params['hostname']
- description = module.params['description']
- snapname = module.params['snapname']
- timeout = module.params['timeout']
- force = module.params['force']
- unbind = module.params['unbind']
- vmstate = module.params['vmstate']
- retention = module.params['retention']
-
- # If hostname is set get the VM id from ProxmoxAPI
- if not vmid and hostname:
- vmid = proxmox.get_vmid(hostname)
- elif not vmid:
- module.exit_json(changed=False, msg="Vmid could not be fetched for the following action: %s" % state)
-
- vm = proxmox.get_vm(vmid)
-
- if state == 'present':
- try:
- for i in proxmox.snapshot(vm, vmid).get():
- if i['name'] == snapname:
- module.exit_json(changed=False, msg="Snapshot %s is already present" % snapname)
-
- if proxmox.snapshot_create(vm, vmid, timeout, snapname, description, vmstate, unbind, retention):
- if module.check_mode:
- module.exit_json(changed=False, msg="Snapshot %s would be created" % snapname)
- else:
- module.exit_json(changed=True, msg="Snapshot %s created" % snapname)
-
- except Exception as e:
- module.fail_json(msg="Creating snapshot %s of VM %s failed with exception: %s" % (snapname, vmid, to_native(e)))
-
- elif state == 'absent':
- try:
- snap_exist = False
-
- for i in proxmox.snapshot(vm, vmid).get():
- if i['name'] == snapname:
- snap_exist = True
- continue
-
- if not snap_exist:
- module.exit_json(changed=False, msg="Snapshot %s does not exist" % snapname)
- else:
- if proxmox.snapshot_remove(vm, vmid, timeout, snapname, force):
- if module.check_mode:
- module.exit_json(changed=False, msg="Snapshot %s would be removed" % snapname)
- else:
- module.exit_json(changed=True, msg="Snapshot %s removed" % snapname)
-
- except Exception as e:
- module.fail_json(msg="Removing snapshot %s of VM %s failed with exception: %s" % (snapname, vmid, to_native(e)))
- elif state == 'rollback':
- try:
- snap_exist = False
-
- for i in proxmox.snapshot(vm, vmid).get():
- if i['name'] == snapname:
- snap_exist = True
- continue
-
- if not snap_exist:
- module.exit_json(changed=False, msg="Snapshot %s does not exist" % snapname)
- if proxmox.snapshot_rollback(vm, vmid, timeout, snapname):
- if module.check_mode:
- module.exit_json(changed=True, msg="Snapshot %s would be rolled back" % snapname)
- else:
- module.exit_json(changed=True, msg="Snapshot %s rolled back" % snapname)
-
- except Exception as e:
- module.fail_json(msg="Rollback of snapshot %s of VM %s failed with exception: %s" % (snapname, vmid, to_native(e)))
-
-
-if __name__ == '__main__':
- main()
diff --git a/plugins/modules/proxmox_storage_contents_info.py b/plugins/modules/proxmox_storage_contents_info.py
deleted file mode 100644
index e0e95565d7..0000000000
--- a/plugins/modules/proxmox_storage_contents_info.py
+++ /dev/null
@@ -1,147 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-#
-# Copyright Julian Vanden Broeck (@l00ptr)
-# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-from __future__ import absolute_import, division, print_function
-
-__metaclass__ = type
-
-
-DOCUMENTATION = r"""
-module: proxmox_storage_contents_info
-short_description: List content from a Proxmox VE storage
-version_added: 8.2.0
-description:
- - Retrieves information about stored objects on a specific storage attached to a node.
-attributes:
- action_group:
- version_added: 9.0.0
-options:
- storage:
- description:
- - Only return content stored on that specific storage.
- aliases: ['name']
- type: str
- required: true
- node:
- description:
- - Proxmox node to which the storage is attached.
- type: str
- required: true
- content:
- description:
- - Filter on a specific content type.
- type: str
- choices: ["all", "backup", "rootdir", "images", "iso"]
- default: "all"
- vmid:
- description:
- - Filter on a specific VMID.
- type: int
-author: Julian Vanden Broeck (@l00ptr)
-extends_documentation_fragment:
- - community.general.proxmox.actiongroup_proxmox
- - community.general.proxmox.documentation
- - community.general.attributes
- - community.general.attributes.info_module
-"""
-
-
-EXAMPLES = r"""
-- name: List existing storages
- community.general.proxmox_storage_contents_info:
- api_host: helldorado
- api_user: root@pam
- api_password: "{{ password | default(omit) }}"
- api_token_id: "{{ token_id | default(omit) }}"
- api_token_secret: "{{ token_secret | default(omit) }}"
- storage: lvm2
- content: backup
- vmid: 130
-"""
-
-
-RETURN = r"""
-proxmox_storage_content:
- description: Content of of storage attached to a node.
- type: list
- returned: success
- elements: dict
- contains:
- content:
- description: Proxmox content of listed objects on this storage.
- type: str
- returned: success
- ctime:
- description: Creation time of the listed objects.
- type: str
- returned: success
- format:
- description: Format of the listed objects (can be V(raw), V(pbs-vm), V(iso),...).
- type: str
- returned: success
- size:
- description: Size of the listed objects.
- type: int
- returned: success
- subtype:
- description: Subtype of the listed objects (can be V(qemu) or V(lxc)).
- type: str
- returned: When storage is dedicated to backup, typically on PBS storage.
- verification:
- description: Backup verification status of the listed objects.
- type: dict
- returned: When storage is dedicated to backup, typically on PBS storage.
- sample: {
- "state": "ok",
- "upid": "UPID:backup-srv:00130F49:1A12D8375:00001CD7:657A2258:verificationjob:daily\\x3av\\x2dd0cc18c5\\x2d8707:root@pam:"
- }
- volid:
- description: Volume identifier of the listed objects.
- type: str
- returned: success
-"""
-
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible_collections.community.general.plugins.module_utils.proxmox import (
- ProxmoxAnsible, proxmox_auth_argument_spec)
-
-
-def proxmox_storage_info_argument_spec():
- return dict(
- storage=dict(type="str", required=True, aliases=["name"]),
- content=dict(type="str", required=False, default="all", choices=["all", "backup", "rootdir", "images", "iso"]),
- vmid=dict(type="int"),
- node=dict(required=True, type="str"),
- )
-
-
-def main():
- module_args = proxmox_auth_argument_spec()
- storage_info_args = proxmox_storage_info_argument_spec()
- module_args.update(storage_info_args)
-
- module = AnsibleModule(
- argument_spec=module_args,
- required_one_of=[("api_password", "api_token_id")],
- required_together=[("api_token_id", "api_token_secret")],
- supports_check_mode=True,
- )
- result = dict(changed=False)
- proxmox = ProxmoxAnsible(module)
- res = proxmox.get_storage_content(
- node=module.params["node"],
- storage=module.params["storage"],
- content=None if module.params["content"] == "all" else module.params["content"],
- vmid=module.params["vmid"],
- )
- result["proxmox_storage_content"] = res
- module.exit_json(**result)
-
-
-if __name__ == "__main__":
- main()
diff --git a/plugins/modules/proxmox_storage_info.py b/plugins/modules/proxmox_storage_info.py
deleted file mode 100644
index 5b9b1b6aaa..0000000000
--- a/plugins/modules/proxmox_storage_info.py
+++ /dev/null
@@ -1,194 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-#
-# Copyright Tristan Le Guern (@tleguern)
-# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = r"""
-module: proxmox_storage_info
-short_description: Retrieve information about one or more Proxmox VE storages
-version_added: 2.2.0
-description:
- - Retrieve information about one or more Proxmox VE storages.
-attributes:
- action_group:
- version_added: 9.0.0
-options:
- storage:
- description:
- - Only return information on a specific storage.
- aliases: ['name']
- type: str
- type:
- description:
- - Filter on a specific storage type.
- type: str
-author: Tristan Le Guern (@tleguern)
-extends_documentation_fragment:
- - community.general.proxmox.actiongroup_proxmox
- - community.general.proxmox.documentation
- - community.general.attributes
- - community.general.attributes.info_module
-notes:
- - Storage specific options can be returned by this module, please look at the documentation at U(https://pve.proxmox.com/wiki/Storage).
-"""
-
-
-EXAMPLES = r"""
-- name: List existing storages
- community.general.proxmox_storage_info:
- api_host: helldorado
- api_user: root@pam
- api_password: "{{ password | default(omit) }}"
- api_token_id: "{{ token_id | default(omit) }}"
- api_token_secret: "{{ token_secret | default(omit) }}"
- register: proxmox_storages
-
-- name: List NFS storages only
- community.general.proxmox_storage_info:
- api_host: helldorado
- api_user: root@pam
- api_password: "{{ password | default(omit) }}"
- api_token_id: "{{ token_id | default(omit) }}"
- api_token_secret: "{{ token_secret | default(omit) }}"
- type: nfs
- register: proxmox_storages_nfs
-
-- name: Retrieve information about the lvm2 storage
- community.general.proxmox_storage_info:
- api_host: helldorado
- api_user: root@pam
- api_password: "{{ password | default(omit) }}"
- api_token_id: "{{ token_id | default(omit) }}"
- api_token_secret: "{{ token_secret | default(omit) }}"
- storage: lvm2
- register: proxmox_storage_lvm
-"""
-
-
-RETURN = r"""
-proxmox_storages:
- description: List of storage pools.
- returned: on success
- type: list
- elements: dict
- contains:
- content:
- description: Proxmox content types available in this storage.
- returned: on success
- type: list
- elements: str
- digest:
- description: Storage's digest.
- returned: on success
- type: str
- nodes:
- description: List of nodes associated to this storage.
- returned: on success, if storage is not local
- type: list
- elements: str
- path:
- description: Physical path to this storage.
- returned: on success
- type: str
- prune-backups:
- description: Backup retention options.
- returned: on success
- type: list
- elements: dict
- shared:
- description: Is this storage shared.
- returned: on success
- type: bool
- storage:
- description: Storage name.
- returned: on success
- type: str
- type:
- description: Storage type.
- returned: on success
- type: str
-"""
-
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible_collections.community.general.plugins.module_utils.proxmox import (
- proxmox_auth_argument_spec, ProxmoxAnsible, proxmox_to_ansible_bool)
-
-
-class ProxmoxStorageInfoAnsible(ProxmoxAnsible):
- def get_storage(self, storage):
- try:
- storage = self.proxmox_api.storage.get(storage)
- except Exception:
- self.module.fail_json(msg="Storage '%s' does not exist" % storage)
- return ProxmoxStorage(storage)
-
- def get_storages(self, type=None):
- storages = self.proxmox_api.storage.get(type=type)
- storages = [ProxmoxStorage(storage) for storage in storages]
- return storages
-
-
-class ProxmoxStorage:
- def __init__(self, storage):
- self.storage = storage
- # Convert proxmox representation of lists, dicts and boolean for easier
- # manipulation within ansible.
- if 'shared' in self.storage:
- self.storage['shared'] = proxmox_to_ansible_bool(self.storage['shared'])
- if 'content' in self.storage:
- self.storage['content'] = self.storage['content'].split(',')
- if 'nodes' in self.storage:
- self.storage['nodes'] = self.storage['nodes'].split(',')
- if 'prune-backups' in storage:
- options = storage['prune-backups'].split(',')
- self.storage['prune-backups'] = dict()
- for option in options:
- k, v = option.split('=')
- self.storage['prune-backups'][k] = v
-
-
-def proxmox_storage_info_argument_spec():
- return dict(
- storage=dict(type='str', aliases=['name']),
- type=dict(type='str'),
- )
-
-
-def main():
- module_args = proxmox_auth_argument_spec()
- storage_info_args = proxmox_storage_info_argument_spec()
- module_args.update(storage_info_args)
-
- module = AnsibleModule(
- argument_spec=module_args,
- required_one_of=[('api_password', 'api_token_id')],
- required_together=[('api_token_id', 'api_token_secret')],
- mutually_exclusive=[('storage', 'type')],
- supports_check_mode=True
- )
- result = dict(
- changed=False
- )
-
- proxmox = ProxmoxStorageInfoAnsible(module)
- storage = module.params['storage']
- storagetype = module.params['type']
-
- if storage:
- storages = [proxmox.get_storage(storage)]
- else:
- storages = proxmox.get_storages(type=storagetype)
- result['proxmox_storages'] = [storage.storage for storage in storages]
-
- module.exit_json(**result)
-
-
-if __name__ == '__main__':
- main()
diff --git a/plugins/modules/proxmox_tasks_info.py b/plugins/modules/proxmox_tasks_info.py
deleted file mode 100644
index 574a971427..0000000000
--- a/plugins/modules/proxmox_tasks_info.py
+++ /dev/null
@@ -1,188 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-
-# Copyright (c) 2021, Andreas Botzner (@paginabianca)
-# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = r"""
-module: proxmox_tasks_info
-short_description: Retrieve information about one or more Proxmox VE tasks
-version_added: 3.8.0
-description:
- - Retrieve information about one or more Proxmox VE tasks.
-author: 'Andreas Botzner (@paginabianca) '
-attributes:
- action_group:
- version_added: 9.0.0
-options:
- node:
- description:
- - Node where to get tasks.
- required: true
- type: str
- task:
- description:
- - Return specific task.
- aliases: ['upid', 'name']
- type: str
-extends_documentation_fragment:
- - community.general.proxmox.actiongroup_proxmox
- - community.general.proxmox.documentation
- - community.general.attributes
- - community.general.attributes.info_module
-"""
-
-
-EXAMPLES = r"""
-- name: List tasks on node01
- community.general.proxmox_tasks_info:
- api_host: proxmoxhost
- api_user: root@pam
- api_password: '{{ password | default(omit) }}'
- api_token_id: '{{ token_id | default(omit) }}'
- api_token_secret: '{{ token_secret | default(omit) }}'
- node: node01
- register: result
-
-- name: Retrieve information about specific tasks on node01
- community.general.proxmox_tasks_info:
- api_host: proxmoxhost
- api_user: root@pam
- api_password: '{{ password | default(omit) }}'
- api_token_id: '{{ token_id | default(omit) }}'
- api_token_secret: '{{ token_secret | default(omit) }}'
- task: 'UPID:node01:00003263:16167ACE:621EE230:srvreload:networking:root@pam:'
- node: node01
- register: proxmox_tasks
-"""
-
-
-RETURN = r"""
-proxmox_tasks:
- description: List of tasks.
- returned: on success
- type: list
- elements: dict
- contains:
- id:
- description: ID of the task.
- returned: on success
- type: str
- node:
- description: Node name.
- returned: on success
- type: str
- pid:
- description: PID of the task.
- returned: on success
- type: int
- pstart:
- description: Pastart of the task.
- returned: on success
- type: int
- starttime:
- description: Starting time of the task.
- returned: on success
- type: int
- type:
- description: Type of the task.
- returned: on success
- type: str
- upid:
- description: UPID of the task.
- returned: on success
- type: str
- user:
- description: User that owns the task.
- returned: on success
- type: str
- endtime:
- description: Endtime of the task.
- returned: on success, can be absent
- type: int
- status:
- description: Status of the task.
- returned: on success, can be absent
- type: str
- failed:
- description: If the task failed.
- returned: when status is defined
- type: bool
-msg:
- description: Short message.
- returned: on failure
- type: str
- sample: 'Task: UPID:xyz:xyz does not exist on node: proxmoxnode'
-"""
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible_collections.community.general.plugins.module_utils.proxmox import (
- proxmox_auth_argument_spec, ProxmoxAnsible)
-
-
-class ProxmoxTaskInfoAnsible(ProxmoxAnsible):
- def get_task(self, upid, node):
- tasks = self.get_tasks(node)
- for task in tasks:
- if task.info['upid'] == upid:
- return [task]
-
- def get_tasks(self, node):
- tasks = self.proxmox_api.nodes(node).tasks.get()
- return [ProxmoxTask(task) for task in tasks]
-
-
-class ProxmoxTask:
- def __init__(self, task):
- self.info = dict()
- for k, v in task.items():
- if k == 'status' and isinstance(v, str):
- self.info[k] = v
- if v != 'OK':
- self.info['failed'] = True
- else:
- self.info[k] = v
-
-
-def proxmox_task_info_argument_spec():
- return dict(
- task=dict(type='str', aliases=['upid', 'name'], required=False),
- node=dict(type='str', required=True),
- )
-
-
-def main():
- module_args = proxmox_auth_argument_spec()
- task_info_args = proxmox_task_info_argument_spec()
- module_args.update(task_info_args)
-
- module = AnsibleModule(
- argument_spec=module_args,
- required_together=[('api_token_id', 'api_token_secret')],
- required_one_of=[('api_password', 'api_token_id')],
- supports_check_mode=True)
- result = dict(changed=False)
-
- proxmox = ProxmoxTaskInfoAnsible(module)
- upid = module.params['task']
- node = module.params['node']
- if upid:
- tasks = proxmox.get_task(upid=upid, node=node)
- else:
- tasks = proxmox.get_tasks(node=node)
- if tasks is not None:
- result['proxmox_tasks'] = [task.info for task in tasks]
- module.exit_json(**result)
- else:
- result['msg'] = 'Task: {0} does not exist on node: {1}.'.format(
- upid, node)
- module.fail_json(**result)
-
-
-if __name__ == '__main__':
- main()
diff --git a/plugins/modules/proxmox_template.py b/plugins/modules/proxmox_template.py
deleted file mode 100644
index 0081171878..0000000000
--- a/plugins/modules/proxmox_template.py
+++ /dev/null
@@ -1,374 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-#
-# Copyright Ansible Project
-# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = r"""
-module: proxmox_template
-short_description: Management of OS templates in Proxmox VE cluster
-description:
- - Allows you to upload/delete templates in Proxmox VE cluster.
-attributes:
- check_mode:
- support: none
- diff_mode:
- support: none
- action_group:
- version_added: 9.0.0
-options:
- node:
- description:
- - Proxmox VE node on which to operate.
- type: str
- src:
- description:
- - Path to uploaded file.
- - Exactly one of O(src) or O(url) is required for O(state=present).
- type: path
- url:
- description:
- - URL to file to download.
- - Exactly one of O(src) or O(url) is required for O(state=present).
- type: str
- version_added: 10.1.0
- template:
- description:
- - The template name.
- - Required for O(state=absent) to delete a template.
- - Required for O(state=present) to download an appliance container template (pveam).
- type: str
- content_type:
- description:
- - Content type.
- - Required only for O(state=present).
- type: str
- default: 'vztmpl'
- choices: ['vztmpl', 'iso']
- storage:
- description:
- - Target storage.
- type: str
- default: 'local'
- timeout:
- description:
- - Timeout for operations.
- type: int
- default: 30
- force:
- description:
- - It can only be used with O(state=present), existing template will be overwritten.
- type: bool
- default: false
- state:
- description:
- - Indicate desired state of the template.
- type: str
- choices: ['present', 'absent']
- default: present
- checksum_algorithm:
- description:
- - Algorithm used to verify the checksum.
- - If specified, O(checksum) must also be specified.
- type: str
- choices: ['md5', 'sha1', 'sha224', 'sha256', 'sha384', 'sha512']
- version_added: 10.3.0
- checksum:
- description:
- - The checksum to validate against.
- - Checksums are often provided by software distributors to verify that a download is not corrupted.
- - Checksums can usually be found on the distributors download page in the form of a file or string.
- - If specified, O(checksum_algorithm) must also be specified.
- type: str
- version_added: 10.3.0
-notes:
- - Requires C(proxmoxer) and C(requests) modules on host. Those modules can be installed with M(ansible.builtin.pip).
- - C(proxmoxer) >= 1.2.0 requires C(requests_toolbelt) to upload files larger than 256 MB.
-author: Sergei Antipov (@UnderGreen)
-extends_documentation_fragment:
- - community.general.proxmox.actiongroup_proxmox
- - community.general.proxmox.documentation
- - community.general.attributes
-"""
-
-EXAMPLES = r"""
----
-- name: Upload new openvz template with minimal options
- community.general.proxmox_template:
- node: uk-mc02
- api_user: root@pam
- api_password: 1q2w3e
- api_host: node1
- src: ~/ubuntu-14.04-x86_64.tar.gz
-
-- name: Pull new openvz template with minimal options
- community.general.proxmox_template:
- node: uk-mc02
- api_user: root@pam
- api_password: 1q2w3e
- api_host: node1
- url: https://ubuntu-mirror/ubuntu-14.04-x86_64.tar.gz
-
-- name: >
- Upload new openvz template with minimal options use environment
- PROXMOX_PASSWORD variable(you should export it before)
- community.general.proxmox_template:
- node: uk-mc02
- api_user: root@pam
- api_host: node1
- src: ~/ubuntu-14.04-x86_64.tar.gz
-
-- name: Upload new openvz template with all options and force overwrite
- community.general.proxmox_template:
- node: uk-mc02
- api_user: root@pam
- api_password: 1q2w3e
- api_host: node1
- storage: local
- content_type: vztmpl
- src: ~/ubuntu-14.04-x86_64.tar.gz
- force: true
-
-- name: Pull new openvz template with all options and force overwrite
- community.general.proxmox_template:
- node: uk-mc02
- api_user: root@pam
- api_password: 1q2w3e
- api_host: node1
- storage: local
- content_type: vztmpl
- url: https://ubuntu-mirror/ubuntu-14.04-x86_64.tar.gz
- force: true
-
-- name: Delete template with minimal options
- community.general.proxmox_template:
- node: uk-mc02
- api_user: root@pam
- api_password: 1q2w3e
- api_host: node1
- template: ubuntu-14.04-x86_64.tar.gz
- state: absent
-
-- name: Download proxmox appliance container template
- community.general.proxmox_template:
- node: uk-mc02
- api_user: root@pam
- api_password: 1q2w3e
- api_host: node1
- storage: local
- content_type: vztmpl
- template: ubuntu-20.04-standard_20.04-1_amd64.tar.gz
-
-- name: Download and verify a template's checksum
- community.general.proxmox_template:
- node: uk-mc02
- api_user: root@pam
- api_password: 1q2w3e
- api_host: node1
- url: ubuntu-20.04-standard_20.04-1_amd64.tar.gz
- checksum_algorithm: sha256
- checksum: 65d860160bdc9b98abf72407e14ca40b609417de7939897d3b58d55787aaef69
-"""
-
-import os
-import time
-import traceback
-
-from ansible.module_utils.basic import AnsibleModule, missing_required_lib
-from ansible_collections.community.general.plugins.module_utils.proxmox import (proxmox_auth_argument_spec, ProxmoxAnsible)
-from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
-from ansible.module_utils.six.moves.urllib.parse import urlparse, urlencode
-
-REQUESTS_TOOLBELT_ERR = None
-try:
- # requests_toolbelt is used internally by proxmoxer module
- import requests_toolbelt # noqa: F401, pylint: disable=unused-import
- HAS_REQUESTS_TOOLBELT = True
-except ImportError:
- HAS_REQUESTS_TOOLBELT = False
- REQUESTS_TOOLBELT_ERR = traceback.format_exc()
-
-
-class ProxmoxTemplateAnsible(ProxmoxAnsible):
- def has_template(self, node, storage, content_type, template):
- volid = '%s:%s/%s' % (storage, content_type, template)
- try:
- return any(tmpl['volid'] == volid for tmpl in self.proxmox_api.nodes(node).storage(storage).content.get())
- except Exception as e:
- self.module.fail_json(msg="Failed to retrieve template '%s': %s" % (volid, e))
-
- def task_status(self, node, taskid, timeout):
- """
- Check the task status and wait until the task is completed or the timeout is reached.
- """
- while timeout:
- if self.api_task_ok(node, taskid):
- return True
- elif self.api_task_failed(node, taskid):
- self.module.fail_json(msg="Task error: %s" % self.proxmox_api.nodes(node).tasks(taskid).status.get()['exitstatus'])
- timeout = timeout - 1
- if timeout == 0:
- self.module.fail_json(msg='Reached timeout while waiting for uploading/downloading template. Last line in task before timeout: %s' %
- self.proxmox_api.nodes(node).tasks(taskid).log.get()[:1])
-
- time.sleep(1)
- return False
-
- def upload_template(self, node, storage, content_type, realpath, timeout):
- stats = os.stat(realpath)
- if (LooseVersion(self.proxmoxer_version) >= LooseVersion('1.2.0') and
- stats.st_size > 268435456 and not HAS_REQUESTS_TOOLBELT):
- self.module.fail_json(msg="'requests_toolbelt' module is required to upload files larger than 256MB",
- exception=missing_required_lib('requests_toolbelt'))
-
- try:
- taskid = self.proxmox_api.nodes(node).storage(storage).upload.post(content=content_type, filename=open(realpath, 'rb'))
- return self.task_status(node, taskid, timeout)
- except Exception as e:
- self.module.fail_json(msg="Uploading template %s failed with error: %s" % (realpath, e))
-
- def fetch_template(self, node, storage, content_type, url, timeout):
- """Fetch a template from a web url source using the proxmox download-url endpoint
- """
- try:
- taskid = self.proxmox_api.nodes(node).storage(storage)("download-url").post(
- url=url, content=content_type, filename=os.path.basename(url)
- )
- return self.task_status(node, taskid, timeout)
- except Exception as e:
- self.module.fail_json(msg="Fetching template from url %s failed with error: %s" % (url, e))
-
- def download_template(self, node, storage, template, timeout):
- try:
- taskid = self.proxmox_api.nodes(node).aplinfo.post(storage=storage, template=template)
- return self.task_status(node, taskid, timeout)
- except Exception as e:
- self.module.fail_json(msg="Downloading template %s failed with error: %s" % (template, e))
-
- def delete_template(self, node, storage, content_type, template, timeout):
- volid = '%s:%s/%s' % (storage, content_type, template)
- self.proxmox_api.nodes(node).storage(storage).content.delete(volid)
- while timeout:
- if not self.has_template(node, storage, content_type, template):
- return True
- timeout = timeout - 1
- if timeout == 0:
- self.module.fail_json(msg='Reached timeout while waiting for deleting template.')
-
- time.sleep(1)
- return False
-
- def fetch_and_verify(self, node, storage, url, content_type, timeout, checksum, checksum_algorithm):
- """ Fetch a template from a web url, then verify it using a checksum.
- """
- data = {
- 'url': url,
- 'content': content_type,
- 'filename': os.path.basename(url),
- 'checksum': checksum,
- 'checksum-algorithm': checksum_algorithm}
- try:
- taskid = self.proxmox_api.nodes(node).storage(storage).post("download-url?{}".format(urlencode(data)))
- return self.task_status(node, taskid, timeout)
- except Exception as e:
- self.module.fail_json(msg="Checksum mismatch: %s" % (e))
-
-
-def main():
- module_args = proxmox_auth_argument_spec()
- template_args = dict(
- node=dict(),
- src=dict(type='path'),
- url=dict(),
- template=dict(),
- content_type=dict(default='vztmpl', choices=['vztmpl', 'iso']),
- storage=dict(default='local'),
- timeout=dict(type='int', default=30),
- force=dict(type='bool', default=False),
- state=dict(default='present', choices=['present', 'absent']),
- checksum_algorithm=dict(choices=['md5', 'sha1', 'sha224', 'sha256', 'sha384', 'sha512']),
- checksum=dict(type='str'),
- )
- module_args.update(template_args)
-
- module = AnsibleModule(
- argument_spec=module_args,
- required_together=[('api_token_id', 'api_token_secret'), ('checksum', 'checksum_algorithm')],
- required_one_of=[('api_password', 'api_token_id')],
- required_if=[('state', 'absent', ['template'])],
- mutually_exclusive=[("src", "url")],
- )
-
- proxmox = ProxmoxTemplateAnsible(module)
-
- state = module.params['state']
- node = module.params['node']
- storage = module.params['storage']
- timeout = module.params['timeout']
- checksum = module.params['checksum']
- checksum_algorithm = module.params['checksum_algorithm']
-
- if state == 'present':
- content_type = module.params['content_type']
- src = module.params['src']
- url = module.params['url']
-
- # download appliance template
- if content_type == 'vztmpl' and not (src or url):
- template = module.params['template']
-
- if not template:
- module.fail_json(msg='template param for downloading appliance template is mandatory')
-
- if proxmox.has_template(node, storage, content_type, template) and not module.params['force']:
- module.exit_json(changed=False, msg='template with volid=%s:%s/%s already exists' % (storage, content_type, template))
-
- if proxmox.download_template(node, storage, template, timeout):
- module.exit_json(changed=True, msg='template with volid=%s:%s/%s downloaded' % (storage, content_type, template))
-
- if not src and not url:
- module.fail_json(msg='src or url param for uploading template file is mandatory')
- elif not url:
- template = os.path.basename(src)
- if proxmox.has_template(node, storage, content_type, template) and not module.params['force']:
- module.exit_json(changed=False, msg='template with volid=%s:%s/%s already exists' % (storage, content_type, template))
- elif not (os.path.exists(src) and os.path.isfile(src)):
- module.fail_json(msg='template file on path %s not exists' % src)
-
- if proxmox.upload_template(node, storage, content_type, src, timeout):
- module.exit_json(changed=True, msg='template with volid=%s:%s/%s uploaded' % (storage, content_type, template))
- elif not src:
- template = os.path.basename(urlparse(url).path)
- if proxmox.has_template(node, storage, content_type, template):
- if not module.params['force']:
- module.exit_json(changed=False, msg='template with volid=%s:%s/%s already exists' % (storage, content_type, template))
- elif not proxmox.delete_template(node, storage, content_type, template, timeout):
- module.fail_json(changed=False, msg='failed to delete template with volid=%s:%s/%s' % (storage, content_type, template))
-
- if checksum:
- if proxmox.fetch_and_verify(node, storage, url, content_type, timeout, checksum, checksum_algorithm):
- module.exit_json(changed=True, msg="Checksum verified, template with volid=%s:%s/%s uploaded" % (storage, content_type, template))
- if proxmox.fetch_template(node, storage, content_type, url, timeout):
- module.exit_json(changed=True, msg='template with volid=%s:%s/%s uploaded' % (storage, content_type, template))
-
- elif state == 'absent':
- try:
- content_type = module.params['content_type']
- template = module.params['template']
-
- if not proxmox.has_template(node, storage, content_type, template):
- module.exit_json(changed=False, msg='template with volid=%s:%s/%s is already deleted' % (storage, content_type, template))
-
- if proxmox.delete_template(node, storage, content_type, template, timeout):
- module.exit_json(changed=True, msg='template with volid=%s:%s/%s deleted' % (storage, content_type, template))
- except Exception as e:
- module.fail_json(msg="deleting of template %s failed with exception: %s" % (template, e))
-
-
-if __name__ == '__main__':
- main()
diff --git a/plugins/modules/proxmox_user_info.py b/plugins/modules/proxmox_user_info.py
deleted file mode 100644
index a8da1ee30a..0000000000
--- a/plugins/modules/proxmox_user_info.py
+++ /dev/null
@@ -1,260 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-#
-# Copyright Tristan Le Guern
-# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = r"""
-module: proxmox_user_info
-short_description: Retrieve information about one or more Proxmox VE users
-version_added: 1.3.0
-description:
- - Retrieve information about one or more Proxmox VE users.
-attributes:
- action_group:
- version_added: 9.0.0
-options:
- domain:
- description:
- - Restrict results to a specific authentication realm.
- aliases: ['realm']
- type: str
- user:
- description:
- - Restrict results to a specific user.
- aliases: ['name']
- type: str
- userid:
- description:
- - Restrict results to a specific user ID, which is a concatenation of a user and domain parts.
- type: str
-author: Tristan Le Guern (@tleguern)
-extends_documentation_fragment:
- - community.general.proxmox.actiongroup_proxmox
- - community.general.proxmox.documentation
- - community.general.attributes
- - community.general.attributes.info_module
-"""
-
-EXAMPLES = r"""
-- name: List existing users
- community.general.proxmox_user_info:
- api_host: helldorado
- api_user: root@pam
- api_password: "{{ password | default(omit) }}"
- api_token_id: "{{ token_id | default(omit) }}"
- api_token_secret: "{{ token_secret | default(omit) }}"
- register: proxmox_users
-
-- name: List existing users in the pve authentication realm
- community.general.proxmox_user_info:
- api_host: helldorado
- api_user: root@pam
- api_password: "{{ password | default(omit) }}"
- api_token_id: "{{ token_id | default(omit) }}"
- api_token_secret: "{{ token_secret | default(omit) }}"
- domain: pve
- register: proxmox_users_pve
-
-- name: Retrieve information about admin@pve
- community.general.proxmox_user_info:
- api_host: helldorado
- api_user: root@pam
- api_password: "{{ password | default(omit) }}"
- api_token_id: "{{ token_id | default(omit) }}"
- api_token_secret: "{{ token_secret | default(omit) }}"
- userid: admin@pve
- register: proxmox_user_admin
-
-- name: Alternative way to retrieve information about admin@pve
- community.general.proxmox_user_info:
- api_host: helldorado
- api_user: root@pam
- api_password: "{{ password | default(omit) }}"
- api_token_id: "{{ token_id | default(omit) }}"
- api_token_secret: "{{ token_secret | default(omit) }}"
- user: admin
- domain: pve
- register: proxmox_user_admin
-"""
-
-
-RETURN = r"""
-proxmox_users:
- description: List of users.
- returned: always, but can be empty
- type: list
- elements: dict
- contains:
- comment:
- description: Short description of the user.
- returned: on success
- type: str
- domain:
- description: User's authentication realm, also the right part of the user ID.
- returned: on success
- type: str
- email:
- description: User's email address.
- returned: on success
- type: str
- enabled:
- description: User's account state.
- returned: on success
- type: bool
- expire:
- description: Expiration date in seconds since EPOCH. Zero means no expiration.
- returned: on success
- type: int
- firstname:
- description: User's first name.
- returned: on success
- type: str
- groups:
- description: List of groups which the user is a member of.
- returned: on success
- type: list
- elements: str
- keys:
- description: User's two factor authentication keys.
- returned: on success
- type: str
- lastname:
- description: User's last name.
- returned: on success
- type: str
- tokens:
- description: List of API tokens associated to the user.
- returned: on success
- type: list
- elements: dict
- contains:
- comment:
- description: Short description of the token.
- returned: on success
- type: str
- expire:
- description: Expiration date in seconds since EPOCH. Zero means no expiration.
- returned: on success
- type: int
- privsep:
- description: Describe if the API token is further restricted with ACLs or is fully privileged.
- returned: on success
- type: bool
- tokenid:
- description: Token name.
- returned: on success
- type: str
- user:
- description: User's login name, also the left part of the user ID.
- returned: on success
- type: str
- userid:
- description: Proxmox user ID, represented as user@realm.
- returned: on success
- type: str
-"""
-
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible_collections.community.general.plugins.module_utils.proxmox import (
- proxmox_auth_argument_spec, ProxmoxAnsible, proxmox_to_ansible_bool)
-
-
-class ProxmoxUserInfoAnsible(ProxmoxAnsible):
- def get_user(self, userid):
- try:
- user = self.proxmox_api.access.users.get(userid)
- except Exception:
- self.module.fail_json(msg="User '%s' does not exist" % userid)
- user['userid'] = userid
- return ProxmoxUser(user)
-
- def get_users(self, domain=None):
- users = self.proxmox_api.access.users.get(full=1)
- users = [ProxmoxUser(user) for user in users]
- if domain:
- return [user for user in users if user.user['domain'] == domain]
- return users
-
-
-class ProxmoxUser:
- def __init__(self, user):
- self.user = dict()
- # Data representation is not the same depending on API calls
- for k, v in user.items():
- if k == 'enable':
- self.user['enabled'] = proxmox_to_ansible_bool(user['enable'])
- elif k == 'userid':
- self.user['user'] = user['userid'].split('@')[0]
- self.user['domain'] = user['userid'].split('@')[1]
- self.user[k] = v
- elif k in ['groups', 'tokens'] and (v == '' or v is None):
- self.user[k] = []
- elif k == 'groups' and isinstance(v, str):
- self.user['groups'] = v.split(',')
- elif k == 'tokens' and isinstance(v, list):
- for token in v:
- if 'privsep' in token:
- token['privsep'] = proxmox_to_ansible_bool(token['privsep'])
- self.user['tokens'] = v
- elif k == 'tokens' and isinstance(v, dict):
- self.user['tokens'] = list()
- for tokenid, tokenvalues in v.items():
- t = tokenvalues
- t['tokenid'] = tokenid
- if 'privsep' in tokenvalues:
- t['privsep'] = proxmox_to_ansible_bool(tokenvalues['privsep'])
- self.user['tokens'].append(t)
- else:
- self.user[k] = v
-
-
-def proxmox_user_info_argument_spec():
- return dict(
- domain=dict(type='str', aliases=['realm']),
- user=dict(type='str', aliases=['name']),
- userid=dict(type='str'),
- )
-
-
-def main():
- module_args = proxmox_auth_argument_spec()
- user_info_args = proxmox_user_info_argument_spec()
- module_args.update(user_info_args)
-
- module = AnsibleModule(
- argument_spec=module_args,
- required_one_of=[('api_password', 'api_token_id')],
- required_together=[('api_token_id', 'api_token_secret')],
- mutually_exclusive=[('user', 'userid'), ('domain', 'userid')],
- supports_check_mode=True
- )
- result = dict(
- changed=False
- )
-
- proxmox = ProxmoxUserInfoAnsible(module)
- domain = module.params['domain']
- user = module.params['user']
- if user and domain:
- userid = user + '@' + domain
- else:
- userid = module.params['userid']
-
- if userid:
- users = [proxmox.get_user(userid=userid)]
- else:
- users = proxmox.get_users(domain=domain)
- result['proxmox_users'] = [user.user for user in users]
-
- module.exit_json(**result)
-
-
-if __name__ == '__main__':
- main()
diff --git a/plugins/modules/proxmox_vm_info.py b/plugins/modules/proxmox_vm_info.py
deleted file mode 100644
index 34d701c25e..0000000000
--- a/plugins/modules/proxmox_vm_info.py
+++ /dev/null
@@ -1,285 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-
-# Copyright (c) 2023, Sergei Antipov
-# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-from __future__ import absolute_import, division, print_function
-
-__metaclass__ = type
-
-DOCUMENTATION = r"""
-module: proxmox_vm_info
-short_description: Retrieve information about one or more Proxmox VE virtual machines
-version_added: 7.2.0
-description:
- - Retrieve information about one or more Proxmox VE virtual machines.
-author: 'Sergei Antipov (@UnderGreen) '
-attributes:
- action_group:
- version_added: 9.0.0
-options:
- node:
- description:
- - Restrict results to a specific Proxmox VE node.
- type: str
- type:
- description:
- - Restrict results to a specific virtual machine(s) type.
- type: str
- choices:
- - all
- - qemu
- - lxc
- default: all
- vmid:
- description:
- - Restrict results to a specific virtual machine by using its ID.
- - If VM with the specified vmid does not exist in a cluster then resulting list will be empty.
- type: int
- name:
- description:
- - Restrict results to a specific virtual machine(s) by using their name.
- - If VM(s) with the specified name do not exist in a cluster then the resulting list will be empty.
- type: str
- config:
- description:
- - Whether to retrieve the VM configuration along with VM status.
- - If set to V(none) (default), no configuration will be returned.
- - If set to V(current), the current running configuration will be returned.
- - If set to V(pending), the configuration with pending changes applied will be returned.
- type: str
- choices:
- - none
- - current
- - pending
- default: none
- version_added: 8.1.0
- network:
- description:
- - Whether to retrieve the current network status.
- - Requires enabled/running qemu-guest-agent on qemu VMs.
- type: bool
- default: false
- version_added: 9.1.0
-extends_documentation_fragment:
- - community.general.proxmox.actiongroup_proxmox
- - community.general.proxmox.documentation
- - community.general.attributes
- - community.general.attributes.info_module
-"""
-
-EXAMPLES = r"""
-- name: List all existing virtual machines on node
- community.general.proxmox_vm_info:
- api_host: proxmoxhost
- api_user: root@pam
- api_token_id: '{{ token_id | default(omit) }}'
- api_token_secret: '{{ token_secret | default(omit) }}'
- node: node01
-
-- name: List all QEMU virtual machines on node
- community.general.proxmox_vm_info:
- api_host: proxmoxhost
- api_user: root@pam
- api_password: '{{ password | default(omit) }}'
- node: node01
- type: qemu
-
-- name: Retrieve information about specific VM by ID
- community.general.proxmox_vm_info:
- api_host: proxmoxhost
- api_user: root@pam
- api_password: '{{ password | default(omit) }}'
- node: node01
- type: qemu
- vmid: 101
-
-- name: Retrieve information about specific VM by name and get current configuration
- community.general.proxmox_vm_info:
- api_host: proxmoxhost
- api_user: root@pam
- api_password: '{{ password | default(omit) }}'
- node: node01
- type: lxc
- name: lxc05.home.arpa
- config: current
-"""
-
-RETURN = r"""
-proxmox_vms:
- description: List of virtual machines.
- returned: on success
- type: list
- elements: dict
- sample:
- [
- {
- "cpu": 0.258944410905281,
- "cpus": 1,
- "disk": 0,
- "diskread": 0,
- "diskwrite": 0,
- "id": "qemu/100",
- "maxcpu": 1,
- "maxdisk": 34359738368,
- "maxmem": 4294967296,
- "mem": 35158379,
- "name": "pxe.home.arpa",
- "netin": 99715803,
- "netout": 14237835,
- "node": "pve",
- "pid": 1947197,
- "status": "running",
- "template": False,
- "type": "qemu",
- "uptime": 135530,
- "vmid": 100
- },
- {
- "cpu": 0,
- "cpus": 1,
- "disk": 0,
- "diskread": 0,
- "diskwrite": 0,
- "id": "qemu/101",
- "maxcpu": 1,
- "maxdisk": 0,
- "maxmem": 536870912,
- "mem": 0,
- "name": "test1",
- "netin": 0,
- "netout": 0,
- "node": "pve",
- "status": "stopped",
- "template": False,
- "type": "qemu",
- "uptime": 0,
- "vmid": 101
- }
- ]
-"""
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible_collections.community.general.plugins.module_utils.proxmox import (
- proxmox_auth_argument_spec,
- ProxmoxAnsible,
- proxmox_to_ansible_bool,
-)
-
-
-class ProxmoxVmInfoAnsible(ProxmoxAnsible):
- def get_vms_from_cluster_resources(self):
- try:
- return self.proxmox_api.cluster().resources().get(type="vm")
- except Exception as e:
- self.module.fail_json(
- msg="Failed to retrieve VMs information from cluster resources: %s" % e
- )
-
- def get_vms_from_nodes(self, cluster_machines, type, vmid=None, name=None, node=None, config=None, network=False):
- # Leave in dict only machines that user wants to know about
- filtered_vms = {
- vm: info for vm, info in cluster_machines.items() if not (
- type != info["type"]
- or (node and info["node"] != node)
- or (vmid and int(info["vmid"]) != vmid)
- or (name is not None and info["name"] != name)
- )
- }
- # Get list of unique node names and loop through it to get info about machines.
- nodes = frozenset([info["node"] for vm, info in filtered_vms.items()])
- for this_node in nodes:
- # "type" is mandatory and can have only values of "qemu" or "lxc". Seems that use of reflection is safe.
- call_vm_getter = getattr(self.proxmox_api.nodes(this_node), type)
- vms_from_this_node = call_vm_getter().get()
- for detected_vm in vms_from_this_node:
- this_vm_id = int(detected_vm["vmid"])
- desired_vm = filtered_vms.get(this_vm_id, None)
- if desired_vm:
- desired_vm.update(detected_vm)
- desired_vm["vmid"] = this_vm_id
- desired_vm["template"] = proxmox_to_ansible_bool(desired_vm.get("template", 0))
- # When user wants to retrieve the VM configuration
- if config != "none":
- # pending = 0, current = 1
- config_type = 0 if config == "pending" else 1
- # GET /nodes/{node}/qemu/{vmid}/config current=[0/1]
- desired_vm["config"] = call_vm_getter(this_vm_id).config().get(current=config_type)
- if network:
- if type == "qemu":
- desired_vm["network"] = call_vm_getter(this_vm_id).agent("network-get-interfaces").get()['result']
- elif type == "lxc":
- desired_vm["network"] = call_vm_getter(this_vm_id).interfaces.get()
-
- return filtered_vms
-
- def get_qemu_vms(self, cluster_machines, vmid=None, name=None, node=None, config=None, network=False):
- try:
- return self.get_vms_from_nodes(cluster_machines, "qemu", vmid, name, node, config, network)
- except Exception as e:
- self.module.fail_json(msg="Failed to retrieve QEMU VMs information: %s" % e)
-
- def get_lxc_vms(self, cluster_machines, vmid=None, name=None, node=None, config=None, network=False):
- try:
- return self.get_vms_from_nodes(cluster_machines, "lxc", vmid, name, node, config, network)
- except Exception as e:
- self.module.fail_json(msg="Failed to retrieve LXC VMs information: %s" % e)
-
-
-def main():
- module_args = proxmox_auth_argument_spec()
- vm_info_args = dict(
- node=dict(type="str", required=False),
- type=dict(
- type="str", choices=["lxc", "qemu", "all"], default="all", required=False
- ),
- vmid=dict(type="int", required=False),
- name=dict(type="str", required=False),
- config=dict(
- type="str", choices=["none", "current", "pending"],
- default="none", required=False
- ),
- network=dict(type="bool", default=False, required=False),
- )
- module_args.update(vm_info_args)
-
- module = AnsibleModule(
- argument_spec=module_args,
- required_together=[("api_token_id", "api_token_secret")],
- required_one_of=[("api_password", "api_token_id")],
- supports_check_mode=True,
- )
-
- proxmox = ProxmoxVmInfoAnsible(module)
- node = module.params["node"]
- type = module.params["type"]
- vmid = module.params["vmid"]
- name = module.params["name"]
- config = module.params["config"]
- network = module.params["network"]
-
- result = dict(changed=False)
-
- if node and proxmox.get_node(node) is None:
- module.fail_json(msg="Node %s doesn't exist in PVE cluster" % node)
-
- vms_cluster_resources = proxmox.get_vms_from_cluster_resources()
- cluster_machines = {int(machine["vmid"]): machine for machine in vms_cluster_resources}
- vms = {}
-
- if type == "lxc":
- vms = proxmox.get_lxc_vms(cluster_machines, vmid, name, node, config, network)
- elif type == "qemu":
- vms = proxmox.get_qemu_vms(cluster_machines, vmid, name, node, config, network)
- else:
- vms = proxmox.get_qemu_vms(cluster_machines, vmid, name, node, config, network)
- vms.update(proxmox.get_lxc_vms(cluster_machines, vmid, name, node, config, network))
-
- result["proxmox_vms"] = [info for vm, info in sorted(vms.items())]
- module.exit_json(**result)
-
-
-if __name__ == "__main__":
- main()
diff --git a/plugins/modules/pubnub_blocks.py b/plugins/modules/pubnub_blocks.py
index c8992b2aa7..316fced4be 100644
--- a/plugins/modules/pubnub_blocks.py
+++ b/plugins/modules/pubnub_blocks.py
@@ -56,13 +56,13 @@ options:
default: {}
account:
description:
- - Name of PubNub account for from which O(application) will be used to manage blocks.
- - User's account will be used if value not set or empty.
+ - Name of PubNub account for from which O(application) is used to manage blocks.
+ - User's account is used if value not set or empty.
type: str
default: ''
application:
description:
- - Name of target PubNub application for which blocks configuration on specific O(keyset) will be done.
+ - Name of target PubNub application for which blocks configuration on specific O(keyset) is done.
type: str
required: true
keyset:
@@ -72,19 +72,19 @@ options:
required: true
state:
description:
- - Intended block state after event handlers creation / update process will be completed.
+ - Intended block state after event handlers creation / update process is completed.
required: false
default: 'present'
choices: ['started', 'stopped', 'present', 'absent']
type: str
name:
description:
- - Name of managed block which will be later visible on admin.pubnub.com.
+ - Name of managed block which is later visible on admin.pubnub.com.
required: true
type: str
description:
description:
- - Short block description which will be later visible on U(https://admin.pubnub.com).
+ - Short block description which is later visible on U(https://admin.pubnub.com).
- Used only if block does not exists and does not change description for existing block.
required: false
type: str
@@ -99,8 +99,8 @@ options:
- Each entry for existing handlers should contain C(name) (so target handler can be identified). Rest parameters (C(src),
C(channels) and C(event)) can be added if changes required for them.
- It is possible to rename event handler by adding C(changes) key to event handler payload and pass dictionary, which
- will contain single key C(name), where new name should be passed.
- - To remove particular event handler it is possible to set C(state) for it to C(absent) and it will be removed.
+ contains single key C(name), where new name should be passed.
+ - To remove particular event handler it is possible to set C(state) for it to C(absent) and it is removed.
required: false
default: []
type: list
@@ -115,7 +115,7 @@ options:
validate_certs:
description:
- This key allow to try skip certificates check when performing REST API calls. Sometimes host may have issues with
- certificates on it and this will cause problems to call PubNub REST API.
+ certificates on it and this causes problems to call PubNub REST API.
- If check should be ignored V(false) should be passed to this parameter.
required: false
default: true
@@ -532,9 +532,9 @@ def _content_of_file_at_path(path):
def main():
fields = dict(
- email=dict(default='', required=False, type='str'),
- password=dict(default='', required=False, type='str', no_log=True),
- account=dict(default='', required=False, type='str'),
+ email=dict(default='', type='str'),
+ password=dict(default='', type='str', no_log=True),
+ account=dict(default='', type='str'),
application=dict(required=True, type='str'),
keyset=dict(required=True, type='str', no_log=False),
state=dict(default='present', type='str',
diff --git a/plugins/modules/pulp_repo.py b/plugins/modules/pulp_repo.py
index 0af129d26a..cc7f001837 100644
--- a/plugins/modules/pulp_repo.py
+++ b/plugins/modules/pulp_repo.py
@@ -36,7 +36,7 @@ options:
description:
- C(httplib2), the library used by the M(ansible.builtin.uri) module only sends authentication information when a webservice
responds to an initial request with a 401 status. Since some basic auth services do not properly send a 401, logins
- will fail. This option forces the sending of the Basic authentication header upon initial request.
+ fail. This option forces the sending of the Basic authentication header upon initial request.
type: bool
default: false
generate_sqlite:
@@ -131,22 +131,22 @@ options:
default: true
state:
description:
- - The repo state. A state of V(sync) will queue a sync of the repo. This is asynchronous but not delayed like a scheduled
- sync. A state of V(publish) will use the repository's distributor to publish the content.
+ - The repo state. A state of V(sync) queues a sync of the repo. This is asynchronous but not delayed like a scheduled
+ sync. A state of V(publish) uses the repository's distributor to publish the content.
default: present
choices: ["present", "absent", "sync", "publish"]
type: str
url_password:
description:
- The password for use in HTTP basic authentication to the pulp API. If the O(url_username) parameter is not specified,
- the O(url_password) parameter will not be used.
+ the O(url_password) parameter is not used.
url_username:
description:
- The username for use in HTTP basic authentication to the pulp API.
validate_certs:
description:
- - If V(false), SSL certificates will not be validated. This should only be used on personally controlled sites using
- self-signed certificates.
+ - If V(false), SSL certificates are not validated. This should only be used on personally controlled sites using self-signed
+ certificates.
type: bool
default: true
wait_for_completion:
diff --git a/plugins/modules/puppet.py b/plugins/modules/puppet.py
index a631a8ec55..a1ab66efc6 100644
--- a/plugins/modules/puppet.py
+++ b/plugins/modules/puppet.py
@@ -66,8 +66,8 @@ options:
logdest:
description:
- Where the puppet logs should go, if puppet apply is being used.
- - V(all) will go to both C(console) and C(syslog).
- - V(stdout) will be deprecated and replaced by C(console).
+ - V(all) goes to both C(console) and C(syslog).
+ - V(stdout) is deprecated and replaced by C(console).
type: str
choices: [all, stdout, syslog]
default: stdout
@@ -128,8 +128,8 @@ options:
- The default value, V(C), is supported on every system, but can lead to encoding errors if UTF-8 is used in the output.
- Use V(C.UTF-8) or V(en_US.UTF-8) or similar UTF-8 supporting locales in case of problems. You need to make sure the
selected locale is supported on the system the puppet agent runs on.
- - Starting with community.general 9.1.0, you can use the value V(auto) and the module will try and determine the best
- parseable locale to use.
+ - Starting with community.general 9.1.0, you can use the value V(auto) and the module tries to determine the best parseable
+ locale to use.
type: str
default: C
version_added: 8.6.0
diff --git a/plugins/modules/pushbullet.py b/plugins/modules/pushbullet.py
index 8e6ac0ed18..990ac36525 100644
--- a/plugins/modules/pushbullet.py
+++ b/plugins/modules/pushbullet.py
@@ -81,7 +81,7 @@ EXAMPLES = r"""
community.general.pushbullet:
api_key: ABC123abc123ABC123abc123ABC123ab
channel: my-awesome-channel
- title: Broadcasting a message to the #my-awesome-channel folks
+ title: "Broadcasting a message to the #my-awesome-channel folks"
- name: Sends a push notification with title and body to a channel
community.general.pushbullet:
@@ -114,12 +114,12 @@ def main():
module = AnsibleModule(
argument_spec=dict(
api_key=dict(type='str', required=True, no_log=True),
- channel=dict(type='str', default=None),
- device=dict(type='str', default=None),
+ channel=dict(type='str'),
+ device=dict(type='str'),
push_type=dict(type='str', default="note", choices=['note', 'link']),
title=dict(type='str', required=True),
- body=dict(type='str', default=None),
- url=dict(type='str', default=None),
+ body=dict(type='str'),
+ url=dict(type='str'),
),
mutually_exclusive=(
['channel', 'device'],
diff --git a/plugins/modules/pushover.py b/plugins/modules/pushover.py
index ae57411531..dcfce34a06 100644
--- a/plugins/modules/pushover.py
+++ b/plugins/modules/pushover.py
@@ -15,7 +15,7 @@ short_description: Send notifications through U(https://pushover.net)
description:
- Send notifications through pushover to subscriber list of devices and email addresses. Requires pushover app on devices.
notes:
- - You will require a pushover.net account to use this module. But no account is required to receive messages.
+ - You need a pushover.net account to use this module. But no account is required to receive messages.
extends_documentation_fragment:
- community.general.attributes
attributes:
@@ -140,7 +140,7 @@ def main():
msg=dict(required=True),
app_token=dict(required=True, no_log=True),
user_key=dict(required=True, no_log=True),
- pri=dict(required=False, default='0', choices=['-2', '-1', '0', '1', '2']),
+ pri=dict(default='0', choices=['-2', '-1', '0', '1', '2']),
device=dict(type='str'),
),
)
diff --git a/plugins/modules/python_requirements_info.py b/plugins/modules/python_requirements_info.py
index a8ef5a952f..cbe93dd944 100644
--- a/plugins/modules/python_requirements_info.py
+++ b/plugins/modules/python_requirements_info.py
@@ -92,7 +92,7 @@ python_system_path:
- /usr/lib/python/site-packages/
valid:
description: A dictionary of dependencies that matched their desired versions. If no version was specified, then RV(ignore:desired)
- will be V(null).
+ is V(null).
returned: always
type: dict
sample:
diff --git a/plugins/modules/redfish_command.py b/plugins/modules/redfish_command.py
index 25f92bd1f7..f8578488e9 100644
--- a/plugins/modules/redfish_command.py
+++ b/plugins/modules/redfish_command.py
@@ -229,8 +229,8 @@ options:
description:
- Custom OEM properties for HTTP Multipart Push updates.
- If set, then O(update_custom_oem_header) is required too.
- - The properties will be passed raw without any validation or conversion by Ansible. This means the content can be a
- file, a string, or any other data. If the content is a dict that should be converted to JSON, then the content must
+ - The properties are passed raw without any validation or conversion by Ansible. This means the content can be a file,
+ a string, or any other data. If the content is a dictionary that should be converted to JSON, then the content must
be converted to JSON before passing it to this module using the P(ansible.builtin.to_json#filter) filter.
type: raw
version_added: '10.1.0'
@@ -830,15 +830,16 @@ return_values:
returned: on success
type: dict
version_added: 6.1.0
- sample: {
- "update_status": {
- "handle": "/redfish/v1/TaskService/TaskMonitors/735",
- "messages": [],
- "resets_requested": [],
- "ret": true,
- "status": "New"
+ sample:
+ {
+ "update_status": {
+ "handle": "/redfish/v1/TaskService/TaskMonitors/735",
+ "messages": [],
+ "resets_requested": [],
+ "ret": true,
+ "status": "New"
+ }
}
- }
"""
from ansible.module_utils.basic import AnsibleModule
diff --git a/plugins/modules/redfish_config.py b/plugins/modules/redfish_config.py
index 8700d4b8de..6eba0b0048 100644
--- a/plugins/modules/redfish_config.py
+++ b/plugins/modules/redfish_config.py
@@ -166,9 +166,9 @@ options:
volume_details:
required: false
description:
- - Setting dict of volume to be created.
- - If C(CapacityBytes) key is not specified in this dictionary, the size of the volume will be determined by the Redfish
- service. It is possible the size will not be the maximum available size.
+ - Setting dictionary of volume to be created.
+ - If C(CapacityBytes) key is not specified in this dictionary, the size of the volume is determined by the Redfish service.
+ It is possible the size is not the maximum available size.
type: dict
default: {}
version_added: '7.5.0'
diff --git a/plugins/modules/redhat_subscription.py b/plugins/modules/redhat_subscription.py
index 6818253c9d..165bfb2891 100644
--- a/plugins/modules/redhat_subscription.py
+++ b/plugins/modules/redhat_subscription.py
@@ -21,7 +21,7 @@ notes:
- 'The module tries to use the D-Bus C(rhsm) service (part of C(subscription-manager)) to register, starting from community.general
6.5.0: this is done so credentials (username, password, activation keys) can be passed to C(rhsm) in a secure way. C(subscription-manager)
itself gets credentials only as arguments of command line parameters, which is I(not) secure, as they can be easily stolen
- by checking the process listing on the system. Due to limitations of the D-Bus interface of C(rhsm), the module will I(not)
+ by checking the process listing on the system. Due to limitations of the D-Bus interface of C(rhsm), the module does I(not)
use D-Bus for registration when trying either to register using O(token), or when specifying O(environment), or when the
system is old (typically RHEL 7 older than 7.4, RHEL 6, and older).'
- In order to register a system, subscription-manager requires either a username and password, or an activationkey and an
@@ -132,10 +132,9 @@ options:
pool_ids:
description:
- Specify subscription pool IDs to consume.
- - 'A pool ID may be specified as a C(string) - just the pool ID (for example
- V(0123456789abcdef0123456789abcdef)), or as a C(dict) with the pool ID as the key, and a quantity as the value (for
- example V(0123456789abcdef0123456789abcdef: 2). If the quantity is provided, it is used to consume multiple entitlements
- from a pool (the pool must support this).'
+ - 'A pool ID may be specified as a C(string) - just the pool ID (for example V(0123456789abcdef0123456789abcdef)), or
+ as a C(dict) with the pool ID as the key, and a quantity as the value (for example V(0123456789abcdef0123456789abcdef:
+ 2). If the quantity is provided, it is used to consume multiple entitlements from a pool (the pool must support this).'
default: []
type: list
elements: raw
@@ -167,8 +166,8 @@ options:
- Set syspurpose attributes in file C(/etc/rhsm/syspurpose/syspurpose.json) and synchronize these attributes with RHSM
server. Syspurpose attributes help attach the most appropriate subscriptions to the system automatically. When C(syspurpose.json)
file already contains some attributes, then new attributes overwrite existing attributes. When some attribute is not
- listed in the new list of attributes, the existing attribute will be removed from C(syspurpose.json) file. Unknown
- attributes are ignored.
+ listed in the new list of attributes, the existing attribute is removed from C(syspurpose.json) file. Unknown attributes
+ are ignored.
type: dict
suboptions:
usage:
@@ -187,7 +186,7 @@ options:
sync:
description:
- When this option is V(true), then syspurpose attributes are synchronized with RHSM server immediately. When this
- option is V(false), then syspurpose attributes will be synchronized with RHSM server by rhsmcertd daemon.
+ option is V(false), then syspurpose attributes are synchronized with RHSM server by rhsmcertd daemon.
type: bool
default: false
"""
diff --git a/plugins/modules/redis_data.py b/plugins/modules/redis_data.py
index 03ae78dce3..4efe409b72 100644
--- a/plugins/modules/redis_data.py
+++ b/plugins/modules/redis_data.py
@@ -33,7 +33,7 @@ options:
type: str
expiration:
description:
- - Expiration time in milliseconds. Setting this flag will always result in a change in the database.
+ - Expiration time in milliseconds. Setting this option always results in a change in the database.
required: false
type: int
non_existing:
@@ -143,11 +143,11 @@ def main():
redis_auth_args = redis_auth_argument_spec()
module_args = dict(
key=dict(type='str', required=True, no_log=False),
- value=dict(type='str', required=False),
- expiration=dict(type='int', required=False),
- non_existing=dict(type='bool', required=False),
- existing=dict(type='bool', required=False),
- keep_ttl=dict(type='bool', required=False),
+ value=dict(type='str'),
+ expiration=dict(type='int'),
+ non_existing=dict(type='bool'),
+ existing=dict(type='bool'),
+ keep_ttl=dict(type='bool'),
state=dict(type='str', default='present',
choices=['present', 'absent']),
)
diff --git a/plugins/modules/redis_data_incr.py b/plugins/modules/redis_data_incr.py
index 7630d621dc..d5f2fe05c2 100644
--- a/plugins/modules/redis_data_incr.py
+++ b/plugins/modules/redis_data_incr.py
@@ -21,8 +21,8 @@ attributes:
support: partial
details:
- For C(check_mode) to work, the specified O(login_user) needs permission to run the C(GET) command on the key, otherwise
- the module will fail.
- - When using C(check_mode) the module will try to calculate the value that Redis would return. If the key is not present,
+ the module fails.
+ - When using C(check_mode) the module tries to calculate the value that Redis would return. If the key is not present,
V(0.0) is used as value.
diff_mode:
support: none
@@ -98,8 +98,8 @@ def main():
redis_auth_args = redis_auth_argument_spec()
module_args = dict(
key=dict(type='str', required=True, no_log=False),
- increment_int=dict(type='int', required=False),
- increment_float=dict(type='float', required=False),
+ increment_int=dict(type='int'),
+ increment_float=dict(type='float'),
)
module_args.update(redis_auth_args)
diff --git a/plugins/modules/redis_info.py b/plugins/modules/redis_info.py
index bc43f9251e..1b38bc1bca 100644
--- a/plugins/modules/redis_info.py
+++ b/plugins/modules/redis_info.py
@@ -63,7 +63,8 @@ info:
description: The default set of server information sections U(https://redis.io/commands/info).
returned: success
type: dict
- sample: {
+ sample:
+ {
"active_defrag_hits": 0,
"active_defrag_key_hits": 0,
"active_defrag_key_misses": 0,
@@ -196,20 +197,21 @@ cluster:
returned: success if O(cluster=true)
version_added: 9.1.0
type: dict
- sample: {
- "cluster_state": ok,
- "cluster_slots_assigned": 16384,
- "cluster_slots_ok": 16384,
- "cluster_slots_pfail": 0,
- "cluster_slots_fail": 0,
- "cluster_known_nodes": 6,
- "cluster_size": 3,
- "cluster_current_epoch": 6,
- "cluster_my_epoch": 2,
- "cluster_stats_messages_sent": 1483972,
- "cluster_stats_messages_received": 1483968,
- "total_cluster_links_buffer_limit_exceeded": 0
- }
+ sample:
+ {
+ "cluster_state": "ok",
+ "cluster_slots_assigned": 16384,
+ "cluster_slots_ok": 16384,
+ "cluster_slots_pfail": 0,
+ "cluster_slots_fail": 0,
+ "cluster_known_nodes": 6,
+ "cluster_size": 3,
+ "cluster_current_epoch": 6,
+ "cluster_my_epoch": 2,
+ "cluster_stats_messages_sent": 1483972,
+ "cluster_stats_messages_received": 1483968,
+ "total_cluster_links_buffer_limit_exceeded": 0
+ }
"""
import traceback
diff --git a/plugins/modules/rhevm.py b/plugins/modules/rhevm.py
index 4d0a810108..7536b7843a 100644
--- a/plugins/modules/rhevm.py
+++ b/plugins/modules/rhevm.py
@@ -153,67 +153,68 @@ options:
RETURN = r"""
vm:
- description: Returns all of the VMs variables and execution.
- returned: always
- type: dict
- sample: {
- "boot_order": [
- "hd",
- "network"
- ],
- "changed": true,
- "changes": [
- "Delete Protection"
- ],
- "cluster": "C1",
- "cpu_share": "0",
- "created": false,
- "datacenter": "Default",
- "del_prot": true,
- "disks": [
- {
- "domain": "ssd-san",
- "name": "OS",
- "size": 40
- }
- ],
- "eth0": "00:00:5E:00:53:00",
- "eth1": "00:00:5E:00:53:01",
- "eth2": "00:00:5E:00:53:02",
- "exists": true,
- "failed": false,
- "ifaces": [
- {
- "name": "eth0",
- "vlan": "Management"
- },
- {
- "name": "eth1",
- "vlan": "Internal"
- },
- {
- "name": "eth2",
- "vlan": "External"
- }
- ],
- "image": false,
- "mempol": "0",
- "msg": [
- "VM exists",
- "cpu_share was already set to 0",
- "VM high availability was already set to True",
- "The boot order has already been set",
- "VM delete protection has been set to True",
- "Disk web2_Disk0_OS already exists",
- "The VM starting host was already set to host416"
- ],
- "name": "web2",
- "type": "server",
- "uuid": "4ba5a1be-e60b-4368-9533-920f156c817b",
- "vm_ha": true,
- "vmcpu": "4",
- "vmhost": "host416",
- "vmmem": "16"
+ description: Returns all of the VMs variables and execution.
+ returned: always
+ type: dict
+ sample:
+ {
+ "boot_order": [
+ "hd",
+ "network"
+ ],
+ "changed": true,
+ "changes": [
+ "Delete Protection"
+ ],
+ "cluster": "C1",
+ "cpu_share": "0",
+ "created": false,
+ "datacenter": "Default",
+ "del_prot": true,
+ "disks": [
+ {
+ "domain": "ssd-san",
+ "name": "OS",
+ "size": 40
+ }
+ ],
+ "eth0": "00:00:5E:00:53:00",
+ "eth1": "00:00:5E:00:53:01",
+ "eth2": "00:00:5E:00:53:02",
+ "exists": true,
+ "failed": false,
+ "ifaces": [
+ {
+ "name": "eth0",
+ "vlan": "Management"
+ },
+ {
+ "name": "eth1",
+ "vlan": "Internal"
+ },
+ {
+ "name": "eth2",
+ "vlan": "External"
+ }
+ ],
+ "image": false,
+ "mempol": "0",
+ "msg": [
+ "VM exists",
+ "cpu_share was already set to 0",
+ "VM high availability was already set to True",
+ "The boot order has already been set",
+ "VM delete protection has been set to True",
+ "Disk web2_Disk0_OS already exists",
+ "The VM starting host was already set to host416"
+ ],
+ "name": "web2",
+ "type": "server",
+ "uuid": "4ba5a1be-e60b-4368-9533-920f156c817b",
+ "vm_ha": true,
+ "vmcpu": "4",
+ "vmhost": "host416",
+ "vmmem": "16"
}
"""
@@ -811,7 +812,7 @@ class RHEVConn(object):
setChanged()
HOST = self.get_Host(host_name)
state = HOST.status.state
- while (state != 'non_operational' and state != 'up'):
+ while state != 'non_operational' and state != 'up':
HOST = self.get_Host(host_name)
state = HOST.status.state
time.sleep(1)
diff --git a/plugins/modules/rhsm_release.py b/plugins/modules/rhsm_release.py
index 6408d3c171..665c734849 100644
--- a/plugins/modules/rhsm_release.py
+++ b/plugins/modules/rhsm_release.py
@@ -14,8 +14,8 @@ short_description: Set or Unset RHSM Release version
description:
- Sets or unsets the release version used by RHSM repositories.
notes:
- - This module will fail on an unregistered system. Use the M(community.general.redhat_subscription) module to register a
- system prior to setting the RHSM release.
+ - This module fails on an unregistered system. Use the M(community.general.redhat_subscription) module to register a system
+ prior to setting the RHSM release.
- It is possible to interact with C(subscription-manager) only as root, so root permissions are required to successfully
run this module.
requirements:
diff --git a/plugins/modules/rhsm_repository.py b/plugins/modules/rhsm_repository.py
index c80caa0d6c..e59fcd27b4 100644
--- a/plugins/modules/rhsm_repository.py
+++ b/plugins/modules/rhsm_repository.py
@@ -48,7 +48,7 @@ options:
purge:
description:
- Disable all currently enabled repositories that are not not specified in O(name). Only set this to V(true) if passing
- in a list of repositories to the O(name) field. Using this with C(loop) will most likely not have the desired result.
+ in a list of repositories to the O(name) field. Using this with C(loop) is likely not to have the desired result.
type: bool
default: false
"""
diff --git a/plugins/modules/riak.py b/plugins/modules/riak.py
index d7b45af5cd..2009ca0a22 100644
--- a/plugins/modules/riak.py
+++ b/plugins/modules/riak.py
@@ -62,8 +62,8 @@ options:
type: str
validate_certs:
description:
- - If V(false), SSL certificates will not be validated. This should only be used on personally controlled sites using
- self-signed certificates.
+ - If V(false), SSL certificates are not validated. This should only be used on personally controlled sites using self-signed
+ certificates.
type: bool
default: true
"""
@@ -103,15 +103,13 @@ def main():
module = AnsibleModule(
argument_spec=dict(
- command=dict(required=False, default=None, choices=[
- 'ping', 'kv_test', 'join', 'plan', 'commit']),
+ command=dict(choices=['ping', 'kv_test', 'join', 'plan', 'commit']),
config_dir=dict(default='/etc/riak', type='path'),
- http_conn=dict(required=False, default='127.0.0.1:8098'),
- target_node=dict(default='riak@127.0.0.1', required=False),
+ http_conn=dict(default='127.0.0.1:8098'),
+ target_node=dict(default='riak@127.0.0.1'),
wait_for_handoffs=dict(default=0, type='int'),
wait_for_ring=dict(default=0, type='int'),
- wait_for_service=dict(
- required=False, default=None, choices=['kv']),
+ wait_for_service=dict(choices=['kv']),
validate_certs=dict(default=True, type='bool'))
)
diff --git a/plugins/modules/rocketchat.py b/plugins/modules/rocketchat.py
index c09d75ace2..6da9b36e8d 100644
--- a/plugins/modules/rocketchat.py
+++ b/plugins/modules/rocketchat.py
@@ -69,7 +69,7 @@ options:
description:
- Emoji for the message sender. The representation for the available emojis can be got from Rocket Chat.
- For example V(:thumbsup:).
- - If O(icon_emoji) is set, O(icon_url) will not be used.
+ - If O(icon_emoji) is set, O(icon_url) is not used.
link_names:
type: int
description:
@@ -80,8 +80,8 @@ options:
- 0
validate_certs:
description:
- - If V(false), SSL certificates will not be validated. This should only be used on personally controlled sites using
- self-signed certificates.
+ - If V(false), SSL certificates are not validated. This should only be used on personally controlled sites using self-signed
+ certificates.
type: bool
default: true
color:
@@ -102,10 +102,10 @@ options:
- Define a list of attachments.
is_pre740:
description:
- - If V(true), the payload matches Rocket.Chat prior to 7.4.0 format.
- This format has been used by the module since its inception, but is no longer supported by Rocket.Chat 7.4.0.
- - The default value of the option will change to V(false) eventually.
- - This parameter will be removed in a future release when Rocket.Chat 7.4.0 becomes the minimum supported version.
+ - If V(true), the payload matches Rocket.Chat prior to 7.4.0 format. This format has been used by the module since its
+ inception, but is no longer supported by Rocket.Chat 7.4.0.
+ - The default value of the option is going to change to V(false) eventually.
+ - This parameter is going to be removed in a future release when Rocket.Chat 7.4.0 becomes the minimum supported version.
type: bool
default: true
version_added: 10.5.0
@@ -124,7 +124,7 @@ EXAMPLES = r"""
domain: chat.example.com
token: thetoken/generatedby/rocketchat
msg: '{{ inventory_hostname }} completed'
- channel: #ansible
+ channel: "#ansible"
username: 'Ansible on {{ inventory_hostname }}'
icon_url: http://www.example.com/some-image-file.png
link_names: 0
@@ -147,7 +147,7 @@ EXAMPLES = r"""
domain: chat.example.com
attachments:
- text: Display my system load on host A and B
- color: #ff00dd
+ color: "#ff00dd"
title: System load
fields:
- title: System A
@@ -159,13 +159,6 @@ EXAMPLES = r"""
delegate_to: localhost
"""
-RETURN = r"""
-changed:
- description: A flag indicating if any change was made or not.
- returned: success
- type: bool
- sample: false
-"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.urls import fetch_url
@@ -181,7 +174,7 @@ def build_payload_for_rocketchat(module, text, channel, username, icon_url, icon
elif text is not None:
payload = dict(attachments=[dict(text=text, color=color)])
if channel is not None:
- if (channel[0] == '#') or (channel[0] == '@'):
+ if channel[0] == '#' or channel[0] == '@':
payload['channel'] = channel
else:
payload['channel'] = '#' + channel
@@ -228,7 +221,7 @@ def main():
domain=dict(type='str', required=True),
token=dict(type='str', required=True, no_log=True),
protocol=dict(type='str', default='https', choices=['http', 'https']),
- msg=dict(type='str', required=False),
+ msg=dict(type='str'),
channel=dict(type='str'),
username=dict(type='str', default='Ansible'),
icon_url=dict(type='str', default='https://docs.ansible.com/favicon.ico'),
@@ -236,7 +229,7 @@ def main():
link_names=dict(type='int', default=1, choices=[0, 1]),
validate_certs=dict(default=True, type='bool'),
color=dict(type='str', default='normal', choices=['normal', 'good', 'warning', 'danger']),
- attachments=dict(type='list', elements='dict', required=False),
+ attachments=dict(type='list', elements='dict'),
is_pre740=dict(default=True, type='bool')
)
)
diff --git a/plugins/modules/rollbar_deployment.py b/plugins/modules/rollbar_deployment.py
index a9658dbadf..e0cf4e31aa 100644
--- a/plugins/modules/rollbar_deployment.py
+++ b/plugins/modules/rollbar_deployment.py
@@ -61,7 +61,7 @@ options:
default: 'https://api.rollbar.com/api/1/deploy/'
validate_certs:
description:
- - If V(false), SSL certificates for the target URL will not be validated. This should only be used on personally controlled
+ - If V(false), SSL certificates for the target URL are not validated. This should only be used on personally controlled
sites using self-signed certificates.
required: false
default: true
@@ -100,13 +100,10 @@ def main():
token=dict(required=True, no_log=True),
environment=dict(required=True),
revision=dict(required=True),
- user=dict(required=False),
- rollbar_user=dict(required=False),
- comment=dict(required=False),
- url=dict(
- required=False,
- default='https://api.rollbar.com/api/1/deploy/'
- ),
+ user=dict(),
+ rollbar_user=dict(),
+ comment=dict(),
+ url=dict(default='https://api.rollbar.com/api/1/deploy/'),
validate_certs=dict(default=True, type='bool'),
),
supports_check_mode=True
diff --git a/plugins/modules/rpm_ostree_pkg.py b/plugins/modules/rpm_ostree_pkg.py
index 652801ca67..01462b25f1 100644
--- a/plugins/modules/rpm_ostree_pkg.py
+++ b/plugins/modules/rpm_ostree_pkg.py
@@ -82,16 +82,6 @@ EXAMPLES = r"""
"""
RETURN = r"""
-rc:
- description: Return code of rpm-ostree command.
- returned: always
- type: int
- sample: 0
-changed:
- description: State changes.
- returned: always
- type: bool
- sample: true
action:
description: Action performed.
returned: always
@@ -101,17 +91,7 @@ packages:
description: A list of packages specified.
returned: always
type: list
- sample: ['nfs-utils']
-stdout:
- description: Stdout of rpm-ostree command.
- returned: always
- type: str
- sample: 'Staging deployment...done\n...'
-stderr:
- description: Stderr of rpm-ostree command.
- returned: always
- type: str
- sample: ''
+ sample: ["nfs-utils"]
cmd:
description: Full command used for performed action.
returned: always
diff --git a/plugins/modules/rundeck_acl_policy.py b/plugins/modules/rundeck_acl_policy.py
index aa22e6e6ea..0c089792b8 100644
--- a/plugins/modules/rundeck_acl_policy.py
+++ b/plugins/modules/rundeck_acl_policy.py
@@ -129,11 +129,18 @@ from ansible_collections.community.general.plugins.module_utils.rundeck import (
class RundeckACLManager:
def __init__(self, module):
self.module = module
+ if module.params.get("project"):
+ self.endpoint = "project/%s/acl/%s.aclpolicy" % (
+ self.module.params["project"],
+ self.module.params["name"],
+ )
+ else:
+ self.endpoint = "system/acl/%s.aclpolicy" % self.module.params["name"]
def get_acl(self):
resp, info = api_request(
module=self.module,
- endpoint="system/acl/%s.aclpolicy" % self.module.params["name"],
+ endpoint=self.endpoint,
)
return resp
@@ -147,7 +154,7 @@ class RundeckACLManager:
resp, info = api_request(
module=self.module,
- endpoint="system/acl/%s.aclpolicy" % self.module.params["name"],
+ endpoint=self.endpoint,
method="POST",
data={"contents": self.module.params["policy"]},
)
@@ -171,7 +178,7 @@ class RundeckACLManager:
resp, info = api_request(
module=self.module,
- endpoint="system/acl/%s.aclpolicy" % self.module.params["name"],
+ endpoint=self.endpoint,
method="PUT",
data={"contents": self.module.params["policy"]},
)
@@ -194,7 +201,7 @@ class RundeckACLManager:
if not self.module.check_mode:
api_request(
module=self.module,
- endpoint="system/acl/%s.aclpolicy" % self.module.params["name"],
+ endpoint=self.endpoint,
method="DELETE",
)
diff --git a/plugins/modules/rundeck_job_executions_info.py b/plugins/modules/rundeck_job_executions_info.py
index 540c8c7788..77fb94c79d 100644
--- a/plugins/modules/rundeck_job_executions_info.py
+++ b/plugins/modules/rundeck_job_executions_info.py
@@ -80,46 +80,53 @@ paging:
description: Maximum number of results per page.
type: int
returned: success
- sample: {"count": 20, "total": 100, "offset": 0, "max": 20}
+ sample:
+ {
+ "count": 20,
+ "total": 100,
+ "offset": 0,
+ "max": 20
+ }
executions:
- description: Job executions list.
- returned: always
- type: list
- elements: dict
- sample: [
- {
- "id": 1,
- "href": "https://rundeck.example.org/api/39/execution/1",
- "permalink": "https://rundeck.example.org/project/myproject/execution/show/1",
- "status": "succeeded",
- "project": "myproject",
- "executionType": "user",
- "user": "admin",
- "date-started": {
- "unixtime": 1633525515026,
- "date": "2021-10-06T13:05:15Z"
- },
- "date-ended": {
- "unixtime": 1633525518386,
- "date": "2021-10-06T13:05:18Z"
- },
- "job": {
- "id": "697af0c4-72d3-4c15-86a3-b5bfe3c6cb6a",
- "averageDuration": 6381,
- "name": "Test",
- "group": "",
- "project": "myproject",
- "description": "",
- "options": {
- "exit_code": "0"
- },
- "href": "https://rundeck.example.org/api/39/job/697af0c4-72d3-4c15-86a3-b5bfe3c6cb6a",
- "permalink": "https://rundeck.example.org/project/myproject/job/show/697af0c4-72d3-4c15-86a3-b5bfe3c6cb6a"
- },
- "description": "Plugin[com.batix.rundeck.plugins.AnsiblePlaybookInlineWorkflowStep, nodeStep: false]",
- "argstring": "-exit_code 0",
- "serverUUID": "5b9a1438-fa3a-457e-b254-8f3d70338068"
- }
+ description: Job executions list.
+ returned: always
+ type: list
+ elements: dict
+ sample:
+ [
+ {
+ "id": 1,
+ "href": "https://rundeck.example.org/api/39/execution/1",
+ "permalink": "https://rundeck.example.org/project/myproject/execution/show/1",
+ "status": "succeeded",
+ "project": "myproject",
+ "executionType": "user",
+ "user": "admin",
+ "date-started": {
+ "unixtime": 1633525515026,
+ "date": "2021-10-06T13:05:15Z"
+ },
+ "date-ended": {
+ "unixtime": 1633525518386,
+ "date": "2021-10-06T13:05:18Z"
+ },
+ "job": {
+ "id": "697af0c4-72d3-4c15-86a3-b5bfe3c6cb6a",
+ "averageDuration": 6381,
+ "name": "Test",
+ "group": "",
+ "project": "myproject",
+ "description": "",
+ "options": {
+ "exit_code": "0"
+ },
+ "href": "https://rundeck.example.org/api/39/job/697af0c4-72d3-4c15-86a3-b5bfe3c6cb6a",
+ "permalink": "https://rundeck.example.org/project/myproject/job/show/697af0c4-72d3-4c15-86a3-b5bfe3c6cb6a"
+ },
+ "description": "Plugin[com.batix.rundeck.plugins.AnsiblePlaybookInlineWorkflowStep, nodeStep: false]",
+ "argstring": "-exit_code 0",
+ "serverUUID": "5b9a1438-fa3a-457e-b254-8f3d70338068"
+ }
]
"""
diff --git a/plugins/modules/rundeck_job_run.py b/plugins/modules/rundeck_job_run.py
index f46b5ee432..1c5841b3c5 100644
--- a/plugins/modules/rundeck_job_run.py
+++ b/plugins/modules/rundeck_job_run.py
@@ -62,7 +62,7 @@ options:
type: int
description:
- Job execution wait timeout in seconds.
- - If the timeout is reached, the job will be aborted.
+ - If the timeout is reached, the job is aborted.
- Keep in mind that there is a sleep based on O(wait_execution_delay) after each job status check.
default: 120
abort_on_timeout:
@@ -133,48 +133,49 @@ EXAMPLES = r"""
RETURN = r"""
execution_info:
- description: Rundeck job execution metadata.
- returned: always
- type: dict
- sample: {
- "msg": "Job execution succeeded!",
- "execution_info": {
- "id": 1,
- "href": "https://rundeck.example.org/api/39/execution/1",
- "permalink": "https://rundeck.example.org/project/myproject/execution/show/1",
- "status": "succeeded",
- "project": "myproject",
- "executionType": "user",
- "user": "admin",
- "date-started": {
- "unixtime": 1633449020784,
- "date": "2021-10-05T15:50:20Z"
- },
- "date-ended": {
- "unixtime": 1633449026358,
- "date": "2021-10-05T15:50:26Z"
- },
- "job": {
- "id": "697af0c4-72d3-4c15-86a3-b5bfe3c6cb6a",
- "averageDuration": 4917,
- "name": "Test",
- "group": "",
- "project": "myproject",
- "description": "",
- "options": {
- "exit_code": "0"
- },
- "href": "https://rundeck.example.org/api/39/job/697af0c4-72d3-4c15-86a3-b5bfe3c6cb6a",
- "permalink": "https://rundeck.example.org/project/myproject/job/show/697af0c4-72d3-4c15-86a3-b5bfe3c6cb6a"
- },
- "description": "sleep 5 && echo 'Test!' && exit ${option.exit_code}",
- "argstring": "-exit_code 0",
- "serverUUID": "5b9a1438-fa3a-457e-b254-8f3d70338068",
- "successfulNodes": [
- "localhost"
- ],
- "output": "Test!"
- }
+ description: Rundeck job execution metadata.
+ returned: always
+ type: dict
+ sample:
+ {
+ "msg": "Job execution succeeded!",
+ "execution_info": {
+ "id": 1,
+ "href": "https://rundeck.example.org/api/39/execution/1",
+ "permalink": "https://rundeck.example.org/project/myproject/execution/show/1",
+ "status": "succeeded",
+ "project": "myproject",
+ "executionType": "user",
+ "user": "admin",
+ "date-started": {
+ "unixtime": 1633449020784,
+ "date": "2021-10-05T15:50:20Z"
+ },
+ "date-ended": {
+ "unixtime": 1633449026358,
+ "date": "2021-10-05T15:50:26Z"
+ },
+ "job": {
+ "id": "697af0c4-72d3-4c15-86a3-b5bfe3c6cb6a",
+ "averageDuration": 4917,
+ "name": "Test",
+ "group": "",
+ "project": "myproject",
+ "description": "",
+ "options": {
+ "exit_code": "0"
+ },
+ "href": "https://rundeck.example.org/api/39/job/697af0c4-72d3-4c15-86a3-b5bfe3c6cb6a",
+ "permalink": "https://rundeck.example.org/project/myproject/job/show/697af0c4-72d3-4c15-86a3-b5bfe3c6cb6a"
+ },
+ "description": "sleep 5 && echo 'Test!' && exit ${option.exit_code}",
+ "argstring": "-exit_code 0",
+ "serverUUID": "5b9a1438-fa3a-457e-b254-8f3d70338068",
+ "successfulNodes": [
+ "localhost"
+ ],
+ "output": "Test!"
+ }
}
"""
diff --git a/plugins/modules/runit.py b/plugins/modules/runit.py
index f26f241537..5a575fa2ba 100644
--- a/plugins/modules/runit.py
+++ b/plugins/modules/runit.py
@@ -30,9 +30,10 @@ options:
required: true
state:
description:
- - V(started)/V(stopped) are idempotent actions that will not run commands unless necessary. V(restarted) will always
- bounce the service (sv restart) and V(killed) will always bounce the service (sv force-stop). V(reloaded) will send
- a HUP (sv reload). V(once) will run a normally downed sv once (sv once), not really an idempotent operation.
+ - V(started)/V(stopped) are idempotent actions that do not run commands unless necessary.
+ - V(restarted) always bounces the service (sv restart) and V(killed) always bounces the service (sv force-stop).
+ - V(reloaded) always sends a HUP (sv reload).
+ - V(once) runs a normally downed sv once (sv once), not really an idempotent operation.
type: str
choices: [killed, once, reloaded, restarted, started, stopped]
enabled:
diff --git a/plugins/modules/say.py b/plugins/modules/say.py
index 2dc359083d..eff582f125 100644
--- a/plugins/modules/say.py
+++ b/plugins/modules/say.py
@@ -17,7 +17,8 @@ description:
notes:
- In 2.5, this module has been renamed from C(osx_say) to M(community.general.say).
- If you like this module, you may also be interested in the osx_say callback plugin.
- - A list of available voices, with language, can be found by running C(say -v ?) on a OSX host and C(espeak --voices) on a Linux host.
+ - A list of available voices, with language, can be found by running C(say -v ?) on a OSX host and C(espeak --voices) on
+ a Linux host.
extends_documentation_fragment:
- community.general.attributes
attributes:
@@ -66,7 +67,7 @@ def main():
module = AnsibleModule(
argument_spec=dict(
msg=dict(required=True),
- voice=dict(required=False),
+ voice=dict(),
),
supports_check_mode=True
)
diff --git a/plugins/modules/scaleway_compute.py b/plugins/modules/scaleway_compute.py
index c61030bede..f3653cd3b6 100644
--- a/plugins/modules/scaleway_compute.py
+++ b/plugins/modules/scaleway_compute.py
@@ -133,7 +133,7 @@ options:
type: str
description:
- Security group unique identifier.
- - If no value provided, the default security group or current security group will be used.
+ - If no value provided, the default security group or current security group is used.
required: false
"""
diff --git a/plugins/modules/scaleway_compute_private_network.py b/plugins/modules/scaleway_compute_private_network.py
index 5339dfef15..a3b6c031f7 100644
--- a/plugins/modules/scaleway_compute_private_network.py
+++ b/plugins/modules/scaleway_compute_private_network.py
@@ -93,26 +93,26 @@ EXAMPLES = r"""
RETURN = r"""
scaleway_compute_private_network:
- description: Information on the VPC.
- returned: success when O(state=present)
- type: dict
- sample:
- {
- "created_at": "2022-01-15T11:11:12.676445Z",
- "id": "12345678-f1e6-40ec-83e5-12345d67ed89",
- "name": "network",
- "organization_id": "a123b4cd-ef5g-678h-90i1-jk2345678l90",
- "project_id": "a123b4cd-ef5g-678h-90i1-jk2345678l90",
- "tags": [
- "tag1",
- "tag2",
- "tag3",
- "tag4",
- "tag5"
- ],
- "updated_at": "2022-01-15T11:12:04.624837Z",
- "zone": "fr-par-2"
- }
+ description: Information on the VPC.
+ returned: success when O(state=present)
+ type: dict
+ sample:
+ {
+ "created_at": "2022-01-15T11:11:12.676445Z",
+ "id": "12345678-f1e6-40ec-83e5-12345d67ed89",
+ "name": "network",
+ "organization_id": "a123b4cd-ef5g-678h-90i1-jk2345678l90",
+ "project_id": "a123b4cd-ef5g-678h-90i1-jk2345678l90",
+ "tags": [
+ "tag1",
+ "tag2",
+ "tag3",
+ "tag4",
+ "tag5"
+ ],
+ "updated_at": "2022-01-15T11:12:04.624837Z",
+ "zone": "fr-par-2"
+ }
"""
from ansible_collections.community.general.plugins.module_utils.scaleway import SCALEWAY_LOCATION, scaleway_argument_spec, Scaleway
diff --git a/plugins/modules/scaleway_container.py b/plugins/modules/scaleway_container.py
index dc4df1c1d5..8351660fd6 100644
--- a/plugins/modules/scaleway_container.py
+++ b/plugins/modules/scaleway_container.py
@@ -89,7 +89,7 @@ options:
secret_environment_variables:
description:
- Secret environment variables of the container namespace.
- - Updating those values will not output a C(changed) state in Ansible.
+ - Updating those values does not output a C(changed) state in Ansible.
- Injected in container at runtime.
type: dict
default: {}
@@ -125,7 +125,7 @@ options:
max_concurrency:
description:
- Maximum number of connections per container.
- - This parameter will be used to trigger autoscaling.
+ - This parameter is used to trigger autoscaling.
type: int
protocol:
diff --git a/plugins/modules/scaleway_container_namespace.py b/plugins/modules/scaleway_container_namespace.py
index 802a491321..781c9ffc25 100644
--- a/plugins/modules/scaleway_container_namespace.py
+++ b/plugins/modules/scaleway_container_namespace.py
@@ -79,7 +79,7 @@ options:
secret_environment_variables:
description:
- Secret environment variables of the container namespace.
- - Updating those values will not output a C(changed) state in Ansible.
+ - Updating those values does not output a C(changed) state in Ansible.
- Injected in containers at runtime.
type: dict
default: {}
diff --git a/plugins/modules/scaleway_container_registry.py b/plugins/modules/scaleway_container_registry.py
index 132dfe8bb6..4e352c5b9e 100644
--- a/plugins/modules/scaleway_container_registry.py
+++ b/plugins/modules/scaleway_container_registry.py
@@ -71,7 +71,7 @@ options:
type: str
description:
- Default visibility policy.
- - Everyone will be able to pull images from a V(public) registry.
+ - Everyone can pull images from a V(public) registry.
choices:
- public
- private
diff --git a/plugins/modules/scaleway_database_backup.py b/plugins/modules/scaleway_database_backup.py
index b19a6b49bd..48add5dfc6 100644
--- a/plugins/modules/scaleway_database_backup.py
+++ b/plugins/modules/scaleway_database_backup.py
@@ -143,25 +143,26 @@ EXAMPLES = r"""
RETURN = r"""
metadata:
- description: Backup metadata.
- returned: when O(state=present), O(state=exported), or O(state=restored)
- type: dict
- sample: {
- "metadata": {
- "created_at": "2020-08-06T12:42:05.631049Z",
- "database_name": "my-database",
- "download_url": null,
- "download_url_expires_at": null,
- "expires_at": null,
- "id": "a15297bd-0c4a-4b4f-8fbb-b36a35b7eb07",
- "instance_id": "617be32e-6497-4ed7-b4c7-0ee5a81edf49",
- "instance_name": "my-instance",
- "name": "backup_name",
- "region": "fr-par",
- "size": 600000,
- "status": "ready",
- "updated_at": "2020-08-06T12:42:10.581649Z"
- }
+ description: Backup metadata.
+ returned: when O(state=present), O(state=exported), or O(state=restored)
+ type: dict
+ sample:
+ {
+ "metadata": {
+ "created_at": "2020-08-06T12:42:05.631049Z",
+ "database_name": "my-database",
+ "download_url": null,
+ "download_url_expires_at": null,
+ "expires_at": null,
+ "id": "a15297bd-0c4a-4b4f-8fbb-b36a35b7eb07",
+ "instance_id": "617be32e-6497-4ed7-b4c7-0ee5a81edf49",
+ "instance_name": "my-instance",
+ "name": "backup_name",
+ "region": "fr-par",
+ "size": 600000,
+ "status": "ready",
+ "updated_at": "2020-08-06T12:42:10.581649Z"
+ }
}
"""
@@ -353,8 +354,8 @@ def main():
region=dict(required=True, choices=SCALEWAY_REGIONS),
id=dict(),
name=dict(type='str'),
- database_name=dict(required=False),
- instance_id=dict(required=False),
+ database_name=dict(),
+ instance_id=dict(),
expires_at=dict(),
wait=dict(type='bool', default=False),
wait_timeout=dict(type='int', default=300),
diff --git a/plugins/modules/scaleway_function.py b/plugins/modules/scaleway_function.py
index e2142dd1f2..4bc7c42688 100644
--- a/plugins/modules/scaleway_function.py
+++ b/plugins/modules/scaleway_function.py
@@ -89,7 +89,7 @@ options:
secret_environment_variables:
description:
- Secret environment variables of the function.
- - Updating those values will not output a C(changed) state in Ansible.
+ - Updating those values does not output a C(changed) state in Ansible.
- Injected in function at runtime.
type: dict
default: {}
diff --git a/plugins/modules/scaleway_function_namespace.py b/plugins/modules/scaleway_function_namespace.py
index d43b42bc7f..e5e00bf681 100644
--- a/plugins/modules/scaleway_function_namespace.py
+++ b/plugins/modules/scaleway_function_namespace.py
@@ -79,7 +79,7 @@ options:
secret_environment_variables:
description:
- Secret environment variables of the function namespace.
- - Updating those values will not output a C(changed) state in Ansible.
+ - Updating those values does not output a C(changed) state in Ansible.
- Injected in functions at runtime.
type: dict
default: {}
diff --git a/plugins/modules/scaleway_image_info.py b/plugins/modules/scaleway_image_info.py
index 0f6d1539c8..0b2fe0476d 100644
--- a/plugins/modules/scaleway_image_info.py
+++ b/plugins/modules/scaleway_image_info.py
@@ -57,37 +57,37 @@ scaleway_image_info:
type: list
elements: dict
sample:
- "scaleway_image_info": [
- {
- "arch": "x86_64",
- "creation_date": "2018-07-17T16:18:49.276456+00:00",
- "default_bootscript": {
- "architecture": "x86_64",
- "bootcmdargs": "LINUX_COMMON scaleway boot=local nbd.max_part=16",
- "default": false,
- "dtb": "",
- "id": "15fbd2f7-a0f9-412b-8502-6a44da8d98b8",
- "initrd": "http://169.254.42.24/initrd/initrd-Linux-x86_64-v3.14.5.gz",
- "kernel": "http://169.254.42.24/kernel/x86_64-mainline-lts-4.9-4.9.93-rev1/vmlinuz-4.9.93",
- "organization": "11111111-1111-4111-8111-111111111111",
- "public": true,
- "title": "x86_64 mainline 4.9.93 rev1"
- },
- "extra_volumes": [],
- "from_server": null,
- "id": "00ae4a88-3252-4eda-9feb-5f6b56bf5ef0",
- "modification_date": "2018-07-17T16:42:06.319315+00:00",
- "name": "Debian Stretch",
- "organization": "51b656e3-4865-41e8-adbc-0c45bdd780db",
- "public": true,
- "root_volume": {
- "id": "da32dfbb-c5ff-476d-ae2d-c297dd09b7dd",
- "name": "snapshot-2a7229dc-d431-4dc5-b66e-95db08b773af-2018-07-17_16:18",
- "size": 25000000000,
- "volume_type": "l_ssd"
- },
- "state": "available"
- }
+ [
+ {
+ "arch": "x86_64",
+ "creation_date": "2018-07-17T16:18:49.276456+00:00",
+ "default_bootscript": {
+ "architecture": "x86_64",
+ "bootcmdargs": "LINUX_COMMON scaleway boot=local nbd.max_part=16",
+ "default": false,
+ "dtb": "",
+ "id": "15fbd2f7-a0f9-412b-8502-6a44da8d98b8",
+ "initrd": "http://169.254.42.24/initrd/initrd-Linux-x86_64-v3.14.5.gz",
+ "kernel": "http://169.254.42.24/kernel/x86_64-mainline-lts-4.9-4.9.93-rev1/vmlinuz-4.9.93",
+ "organization": "11111111-1111-4111-8111-111111111111",
+ "public": true,
+ "title": "x86_64 mainline 4.9.93 rev1"
+ },
+ "extra_volumes": [],
+ "from_server": null,
+ "id": "00ae4a88-3252-4eda-9feb-5f6b56bf5ef0",
+ "modification_date": "2018-07-17T16:42:06.319315+00:00",
+ "name": "Debian Stretch",
+ "organization": "51b656e3-4865-41e8-adbc-0c45bdd780db",
+ "public": true,
+ "root_volume": {
+ "id": "da32dfbb-c5ff-476d-ae2d-c297dd09b7dd",
+ "name": "snapshot-2a7229dc-d431-4dc5-b66e-95db08b773af-2018-07-17_16:18",
+ "size": 25000000000,
+ "volume_type": "l_ssd"
+ },
+ "state": "available"
+ }
]
"""
diff --git a/plugins/modules/scaleway_ip.py b/plugins/modules/scaleway_ip.py
index 2d51478159..ce9977bbf0 100644
--- a/plugins/modules/scaleway_ip.py
+++ b/plugins/modules/scaleway_ip.py
@@ -90,23 +90,24 @@ EXAMPLES = r"""
RETURN = r"""
data:
- description: This is only present when O(state=present).
- returned: when O(state=present)
- type: dict
- sample: {
+ description: This is only present when O(state=present).
+ returned: when O(state=present)
+ type: dict
+ sample:
+ {
"ips": [
{
- "organization": "951df375-e094-4d26-97c1-ba548eeb9c42",
- "reverse": null,
- "id": "dd9e8df6-6775-4863-b517-e0b0ee3d7477",
- "server": {
- "id": "3f1568ca-b1a2-4e98-b6f7-31a0588157f1",
- "name": "ansible_tuto-1"
- },
- "address": "212.47.232.136"
+ "organization": "951df375-e094-4d26-97c1-ba548eeb9c42",
+ "reverse": null,
+ "id": "dd9e8df6-6775-4863-b517-e0b0ee3d7477",
+ "server": {
+ "id": "3f1568ca-b1a2-4e98-b6f7-31a0588157f1",
+ "name": "ansible_tuto-1"
+ },
+ "address": "212.47.232.136"
}
- ]
- }
+ ]
+ }
"""
from ansible_collections.community.general.plugins.module_utils.scaleway import SCALEWAY_LOCATION, scaleway_argument_spec, Scaleway
diff --git a/plugins/modules/scaleway_ip_info.py b/plugins/modules/scaleway_ip_info.py
index 7b8e1e6298..0812746619 100644
--- a/plugins/modules/scaleway_ip_info.py
+++ b/plugins/modules/scaleway_ip_info.py
@@ -52,22 +52,22 @@ RETURN = r"""
scaleway_ip_info:
description:
- Response from Scaleway API.
- - 'For more details please refer to U(https://developers.scaleway.com/en/products/instance/api/).'
+ - For more details please refer to U(https://developers.scaleway.com/en/products/instance/api/).
returned: success
type: list
elements: dict
sample:
- "scaleway_ip_info": [
- {
- "address": "163.172.170.243",
- "id": "ea081794-a581-8899-8451-386ddaf0a451",
- "organization": "3f709602-5e6c-4619-b80c-e324324324af",
- "reverse": null,
- "server": {
- "id": "12f19bc7-109c-4517-954c-e6b3d0311363",
- "name": "scw-e0d158"
- }
+ [
+ {
+ "address": "163.172.170.243",
+ "id": "ea081794-a581-8899-8451-386ddaf0a451",
+ "organization": "3f709602-5e6c-4619-b80c-e324324324af",
+ "reverse": null,
+ "server": {
+ "id": "12f19bc7-109c-4517-954c-e6b3d0311363",
+ "name": "scw-e0d158"
}
+ }
]
"""
diff --git a/plugins/modules/scaleway_organization_info.py b/plugins/modules/scaleway_organization_info.py
index 603ab3cd4c..a28b290bbc 100644
--- a/plugins/modules/scaleway_organization_info.py
+++ b/plugins/modules/scaleway_organization_info.py
@@ -44,28 +44,28 @@ scaleway_organization_info:
type: list
elements: dict
sample:
- "scaleway_organization_info": [
- {
- "address_city_name": "Paris",
- "address_country_code": "FR",
- "address_line1": "42 Rue de l'univers",
- "address_line2": null,
- "address_postal_code": "75042",
- "address_subdivision_code": "FR-75",
- "creation_date": "2018-08-06T13:43:28.508575+00:00",
- "currency": "EUR",
- "customer_class": "individual",
- "id": "3f709602-5e6c-4619-b80c-e8432ferewtr",
- "locale": "fr_FR",
- "modification_date": "2018-08-06T14:56:41.401685+00:00",
- "name": "James Bond",
- "support_id": "694324",
- "support_level": "basic",
- "support_pin": "9324",
- "users": [],
- "vat_number": null,
- "warnings": []
- }
+ [
+ {
+ "address_city_name": "Paris",
+ "address_country_code": "FR",
+ "address_line1": "42 Rue de l'univers",
+ "address_line2": null,
+ "address_postal_code": "75042",
+ "address_subdivision_code": "FR-75",
+ "creation_date": "2018-08-06T13:43:28.508575+00:00",
+ "currency": "EUR",
+ "customer_class": "individual",
+ "id": "3f709602-5e6c-4619-b80c-e8432ferewtr",
+ "locale": "fr_FR",
+ "modification_date": "2018-08-06T14:56:41.401685+00:00",
+ "name": "James Bond",
+ "support_id": "694324",
+ "support_level": "basic",
+ "support_pin": "9324",
+ "users": [],
+ "vat_number": null,
+ "warnings": []
+ }
]
"""
diff --git a/plugins/modules/scaleway_private_network.py b/plugins/modules/scaleway_private_network.py
index 922a780098..edd5d435cb 100644
--- a/plugins/modules/scaleway_private_network.py
+++ b/plugins/modules/scaleway_private_network.py
@@ -89,26 +89,26 @@ EXAMPLES = r"""
RETURN = r"""
scaleway_private_network:
- description: Information on the VPC.
- returned: success when O(state=present)
- type: dict
- sample:
- {
- "created_at": "2022-01-15T11:11:12.676445Z",
- "id": "12345678-f1e6-40ec-83e5-12345d67ed89",
- "name": "network",
- "organization_id": "a123b4cd-ef5g-678h-90i1-jk2345678l90",
- "project_id": "a123b4cd-ef5g-678h-90i1-jk2345678l90",
- "tags": [
- "tag1",
- "tag2",
- "tag3",
- "tag4",
- "tag5"
- ],
- "updated_at": "2022-01-15T11:12:04.624837Z",
- "zone": "fr-par-2"
- }
+ description: Information on the VPC.
+ returned: success when O(state=present)
+ type: dict
+ sample:
+ {
+ "created_at": "2022-01-15T11:11:12.676445Z",
+ "id": "12345678-f1e6-40ec-83e5-12345d67ed89",
+ "name": "network",
+ "organization_id": "a123b4cd-ef5g-678h-90i1-jk2345678l90",
+ "project_id": "a123b4cd-ef5g-678h-90i1-jk2345678l90",
+ "tags": [
+ "tag1",
+ "tag2",
+ "tag3",
+ "tag4",
+ "tag5"
+ ],
+ "updated_at": "2022-01-15T11:12:04.624837Z",
+ "zone": "fr-par-2"
+ }
"""
from ansible_collections.community.general.plugins.module_utils.scaleway import SCALEWAY_LOCATION, scaleway_argument_spec, Scaleway
diff --git a/plugins/modules/scaleway_security_group.py b/plugins/modules/scaleway_security_group.py
index 3e1a28275e..cb4e44c844 100644
--- a/plugins/modules/scaleway_security_group.py
+++ b/plugins/modules/scaleway_security_group.py
@@ -109,22 +109,23 @@ EXAMPLES = r"""
RETURN = r"""
data:
- description: This is only present when O(state=present).
- returned: when O(state=present)
- type: dict
- sample: {
- "scaleway_security_group": {
- "description": "my security group description",
- "enable_default_security": true,
- "id": "0168fb1f-cc46-4f69-b4be-c95d2a19bcae",
- "inbound_default_policy": "accept",
- "name": "security_group",
- "organization": "43a3b6c8-916f-477b-b7ec-ff1898f5fdd9",
- "organization_default": false,
- "outbound_default_policy": "accept",
- "servers": [],
- "stateful": false
- }
+ description: This is only present when O(state=present).
+ returned: when O(state=present)
+ type: dict
+ sample:
+ {
+ "scaleway_security_group": {
+ "description": "my security group description",
+ "enable_default_security": true,
+ "id": "0168fb1f-cc46-4f69-b4be-c95d2a19bcae",
+ "inbound_default_policy": "accept",
+ "name": "security_group",
+ "organization": "43a3b6c8-916f-477b-b7ec-ff1898f5fdd9",
+ "organization_default": false,
+ "outbound_default_policy": "accept",
+ "servers": [],
+ "stateful": false
+ }
}
"""
diff --git a/plugins/modules/scaleway_security_group_info.py b/plugins/modules/scaleway_security_group_info.py
index 6664938e09..4cdb295282 100644
--- a/plugins/modules/scaleway_security_group_info.py
+++ b/plugins/modules/scaleway_security_group_info.py
@@ -56,21 +56,21 @@ scaleway_security_group_info:
type: list
elements: dict
sample:
- "scaleway_security_group_info": [
- {
- "description": "test-ams",
- "enable_default_security": true,
- "id": "7fcde327-8bed-43a6-95c4-6dfbc56d8b51",
- "name": "test-ams",
- "organization": "3f709602-5e6c-4619-b80c-e841c89734af",
- "organization_default": false,
- "servers": [
- {
- "id": "12f19bc7-108c-4517-954c-e6b3d0311363",
- "name": "scw-e0d158"
- }
- ]
- }
+ [
+ {
+ "description": "test-ams",
+ "enable_default_security": true,
+ "id": "7fcde327-8bed-43a6-95c4-6dfbc56d8b51",
+ "name": "test-ams",
+ "organization": "3f709602-5e6c-4619-b80c-e841c89734af",
+ "organization_default": false,
+ "servers": [
+ {
+ "id": "12f19bc7-108c-4517-954c-e6b3d0311363",
+ "name": "scw-e0d158"
+ }
+ ]
+ }
]
"""
diff --git a/plugins/modules/scaleway_security_group_rule.py b/plugins/modules/scaleway_security_group_rule.py
index ec89d41f6c..f7f6304a26 100644
--- a/plugins/modules/scaleway_security_group_rule.py
+++ b/plugins/modules/scaleway_security_group_rule.py
@@ -116,21 +116,22 @@ EXAMPLES = r"""
RETURN = r"""
data:
- description: This is only present when O(state=present).
- returned: when O(state=present)
- type: dict
- sample: {
- "scaleway_security_group_rule": {
- "direction": "inbound",
- "protocol": "TCP",
- "ip_range": "0.0.0.0/0",
- "dest_port_from": 80,
- "action": "accept",
- "position": 2,
- "dest_port_to": null,
- "editable": null,
- "id": "10cb0b9a-80f6-4830-abd7-a31cd828b5e9"
- }
+ description: This is only present when O(state=present).
+ returned: when O(state=present)
+ type: dict
+ sample:
+ {
+ "scaleway_security_group_rule": {
+ "direction": "inbound",
+ "protocol": "TCP",
+ "ip_range": "0.0.0.0/0",
+ "dest_port_from": 80,
+ "action": "accept",
+ "position": 2,
+ "dest_port_to": null,
+ "editable": null,
+ "id": "10cb0b9a-80f6-4830-abd7-a31cd828b5e9"
+ }
}
"""
diff --git a/plugins/modules/scaleway_server_info.py b/plugins/modules/scaleway_server_info.py
index 39af47005e..327715d2db 100644
--- a/plugins/modules/scaleway_server_info.py
+++ b/plugins/modules/scaleway_server_info.py
@@ -57,103 +57,103 @@ scaleway_server_info:
type: list
elements: dict
sample:
- "scaleway_server_info": [
- {
- "arch": "x86_64",
- "boot_type": "local",
- "bootscript": {
- "architecture": "x86_64",
- "bootcmdargs": "LINUX_COMMON scaleway boot=local nbd.max_part=16",
- "default": true,
- "dtb": "",
- "id": "b1e68c26-a19c-4eac-9222-498b22bd7ad9",
- "initrd": "http://169.254.42.24/initrd/initrd-Linux-x86_64-v3.14.5.gz",
- "kernel": "http://169.254.42.24/kernel/x86_64-mainline-lts-4.4-4.4.127-rev1/vmlinuz-4.4.127",
- "organization": "11111111-1111-4111-8111-111111111111",
- "public": true,
- "title": "x86_64 mainline 4.4.127 rev1"
- },
- "commercial_type": "START1-XS",
+ [
+ {
+ "arch": "x86_64",
+ "boot_type": "local",
+ "bootscript": {
+ "architecture": "x86_64",
+ "bootcmdargs": "LINUX_COMMON scaleway boot=local nbd.max_part=16",
+ "default": true,
+ "dtb": "",
+ "id": "b1e68c26-a19c-4eac-9222-498b22bd7ad9",
+ "initrd": "http://169.254.42.24/initrd/initrd-Linux-x86_64-v3.14.5.gz",
+ "kernel": "http://169.254.42.24/kernel/x86_64-mainline-lts-4.4-4.4.127-rev1/vmlinuz-4.4.127",
+ "organization": "11111111-1111-4111-8111-111111111111",
+ "public": true,
+ "title": "x86_64 mainline 4.4.127 rev1"
+ },
+ "commercial_type": "START1-XS",
+ "creation_date": "2018-08-14T21:36:56.271545+00:00",
+ "dynamic_ip_required": false,
+ "enable_ipv6": false,
+ "extra_networks": [],
+ "hostname": "scw-e0d256",
+ "id": "12f19bc7-108c-4517-954c-e6b3d0311363",
+ "image": {
+ "arch": "x86_64",
+ "creation_date": "2018-04-26T12:42:21.619844+00:00",
+ "default_bootscript": {
+ "architecture": "x86_64",
+ "bootcmdargs": "LINUX_COMMON scaleway boot=local nbd.max_part=16",
+ "default": true,
+ "dtb": "",
+ "id": "b1e68c26-a19c-4eac-9222-498b22bd7ad9",
+ "initrd": "http://169.254.42.24/initrd/initrd-Linux-x86_64-v3.14.5.gz",
+ "kernel": "http://169.254.42.24/kernel/x86_64-mainline-lts-4.4-4.4.127-rev1/vmlinuz-4.4.127",
+ "organization": "11111111-1111-4111-8111-111111111111",
+ "public": true,
+ "title": "x86_64 mainline 4.4.127 rev1"
+ },
+ "extra_volumes": [],
+ "from_server": null,
+ "id": "67375eb1-f14d-4f02-bb42-6119cecbde51",
+ "modification_date": "2018-04-26T12:49:07.573004+00:00",
+ "name": "Ubuntu Xenial",
+ "organization": "51b656e3-4865-41e8-adbc-0c45bdd780db",
+ "public": true,
+ "root_volume": {
+ "id": "020b8d61-3867-4a0e-84a4-445c5393e05d",
+ "name": "snapshot-87fc282d-f252-4262-adad-86979d9074cf-2018-04-26_12:42",
+ "size": 25000000000,
+ "volume_type": "l_ssd"
+ },
+ "state": "available"
+ },
+ "ipv6": null,
+ "location": {
+ "cluster_id": "5",
+ "hypervisor_id": "412",
+ "node_id": "2",
+ "platform_id": "13",
+ "zone_id": "par1"
+ },
+ "maintenances": [],
+ "modification_date": "2018-08-14T21:37:28.630882+00:00",
+ "name": "scw-e0d256",
+ "organization": "3f709602-5e6c-4619-b80c-e841c89734af",
+ "private_ip": "10.14.222.131",
+ "protected": false,
+ "public_ip": {
+ "address": "163.172.170.197",
+ "dynamic": false,
+ "id": "ea081794-a581-4495-8451-386ddaf0a451"
+ },
+ "security_group": {
+ "id": "a37379d2-d8b0-4668-9cfb-1233fc436f7e",
+ "name": "Default security group"
+ },
+ "state": "running",
+ "state_detail": "booted",
+ "tags": [],
+ "volumes": {
+ "0": {
"creation_date": "2018-08-14T21:36:56.271545+00:00",
- "dynamic_ip_required": false,
- "enable_ipv6": false,
- "extra_networks": [],
- "hostname": "scw-e0d256",
- "id": "12f19bc7-108c-4517-954c-e6b3d0311363",
- "image": {
- "arch": "x86_64",
- "creation_date": "2018-04-26T12:42:21.619844+00:00",
- "default_bootscript": {
- "architecture": "x86_64",
- "bootcmdargs": "LINUX_COMMON scaleway boot=local nbd.max_part=16",
- "default": true,
- "dtb": "",
- "id": "b1e68c26-a19c-4eac-9222-498b22bd7ad9",
- "initrd": "http://169.254.42.24/initrd/initrd-Linux-x86_64-v3.14.5.gz",
- "kernel": "http://169.254.42.24/kernel/x86_64-mainline-lts-4.4-4.4.127-rev1/vmlinuz-4.4.127",
- "organization": "11111111-1111-4111-8111-111111111111",
- "public": true,
- "title": "x86_64 mainline 4.4.127 rev1"
- },
- "extra_volumes": [],
- "from_server": null,
- "id": "67375eb1-f14d-4f02-bb42-6119cecbde51",
- "modification_date": "2018-04-26T12:49:07.573004+00:00",
- "name": "Ubuntu Xenial",
- "organization": "51b656e3-4865-41e8-adbc-0c45bdd780db",
- "public": true,
- "root_volume": {
- "id": "020b8d61-3867-4a0e-84a4-445c5393e05d",
- "name": "snapshot-87fc282d-f252-4262-adad-86979d9074cf-2018-04-26_12:42",
- "size": 25000000000,
- "volume_type": "l_ssd"
- },
- "state": "available"
- },
- "ipv6": null,
- "location": {
- "cluster_id": "5",
- "hypervisor_id": "412",
- "node_id": "2",
- "platform_id": "13",
- "zone_id": "par1"
- },
- "maintenances": [],
- "modification_date": "2018-08-14T21:37:28.630882+00:00",
- "name": "scw-e0d256",
+ "export_uri": "device://dev/vda",
+ "id": "68386fae-4f55-4fbf-aabb-953036a85872",
+ "modification_date": "2018-08-14T21:36:56.271545+00:00",
+ "name": "snapshot-87fc282d-f252-4262-adad-86979d9074cf-2018-04-26_12:42",
"organization": "3f709602-5e6c-4619-b80c-e841c89734af",
- "private_ip": "10.14.222.131",
- "protected": false,
- "public_ip": {
- "address": "163.172.170.197",
- "dynamic": false,
- "id": "ea081794-a581-4495-8451-386ddaf0a451"
+ "server": {
+ "id": "12f19bc7-108c-4517-954c-e6b3d0311363",
+ "name": "scw-e0d256"
},
- "security_group": {
- "id": "a37379d2-d8b0-4668-9cfb-1233fc436f7e",
- "name": "Default security group"
- },
- "state": "running",
- "state_detail": "booted",
- "tags": [],
- "volumes": {
- "0": {
- "creation_date": "2018-08-14T21:36:56.271545+00:00",
- "export_uri": "device://dev/vda",
- "id": "68386fae-4f55-4fbf-aabb-953036a85872",
- "modification_date": "2018-08-14T21:36:56.271545+00:00",
- "name": "snapshot-87fc282d-f252-4262-adad-86979d9074cf-2018-04-26_12:42",
- "organization": "3f709602-5e6c-4619-b80c-e841c89734af",
- "server": {
- "id": "12f19bc7-108c-4517-954c-e6b3d0311363",
- "name": "scw-e0d256"
- },
- "size": 25000000000,
- "state": "available",
- "volume_type": "l_ssd"
- }
- }
+ "size": 25000000000,
+ "state": "available",
+ "volume_type": "l_ssd"
+ }
}
+ }
]
"""
diff --git a/plugins/modules/scaleway_snapshot_info.py b/plugins/modules/scaleway_snapshot_info.py
index 6b932cced2..ead1826aa4 100644
--- a/plugins/modules/scaleway_snapshot_info.py
+++ b/plugins/modules/scaleway_snapshot_info.py
@@ -57,20 +57,20 @@ scaleway_snapshot_info:
type: list
elements: dict
sample:
- "scaleway_snapshot_info": [
+ [
{
- "base_volume": {
- "id": "68386fae-4f55-4fbf-aabb-953036a85872",
- "name": "snapshot-87fc282d-f252-4262-adad-86979d9074cf-2018-04-26_12:42"
- },
- "creation_date": "2018-08-14T22:34:35.299461+00:00",
- "id": "b61b4b03-a2e9-4da5-b5ea-e462ac0662d2",
- "modification_date": "2018-08-14T22:34:54.520560+00:00",
- "name": "snapshot-87fc282d-f252-4262-adad-86979d9074cf-2018-04-26_12:42 snapshot",
- "organization": "3f709602-5e6c-4619-b80c-e841c89734af",
- "size": 25000000000,
- "state": "available",
- "volume_type": "l_ssd"
+ "base_volume": {
+ "id": "68386fae-4f55-4fbf-aabb-953036a85872",
+ "name": "snapshot-87fc282d-f252-4262-adad-86979d9074cf-2018-04-26_12:42"
+ },
+ "creation_date": "2018-08-14T22:34:35.299461+00:00",
+ "id": "b61b4b03-a2e9-4da5-b5ea-e462ac0662d2",
+ "modification_date": "2018-08-14T22:34:54.520560+00:00",
+ "name": "snapshot-87fc282d-f252-4262-adad-86979d9074cf-2018-04-26_12:42 snapshot",
+ "organization": "3f709602-5e6c-4619-b80c-e841c89734af",
+ "size": 25000000000,
+ "state": "available",
+ "volume_type": "l_ssd"
}
]
"""
diff --git a/plugins/modules/scaleway_sshkey.py b/plugins/modules/scaleway_sshkey.py
index 37e8ec8c3b..a8ccc155e1 100644
--- a/plugins/modules/scaleway_sshkey.py
+++ b/plugins/modules/scaleway_sshkey.py
@@ -71,13 +71,16 @@ EXAMPLES = r"""
RETURN = r"""
data:
- description: This is only present when O(state=present).
- returned: when O(state=present)
- type: dict
- sample: {
- "ssh_public_keys": [
- {"key": "ssh-rsa AAAA...."}
- ]
+ description: This is only present when O(state=present).
+ returned: when O(state=present)
+ type: dict
+ sample:
+ {
+ "ssh_public_keys": [
+ {
+ "key": "ssh-rsa AAAA...."
+ }
+ ]
}
"""
diff --git a/plugins/modules/scaleway_volume.py b/plugins/modules/scaleway_volume.py
index ed6a506742..c7c6346075 100644
--- a/plugins/modules/scaleway_volume.py
+++ b/plugins/modules/scaleway_volume.py
@@ -95,10 +95,11 @@ EXAMPLES = r"""
RETURN = r"""
data:
- description: This is only present when O(state=present).
- returned: when O(state=present)
- type: dict
- sample: {
+ description: This is only present when O(state=present).
+ returned: when O(state=present)
+ type: dict
+ sample:
+ {
"volume": {
"export_uri": null,
"id": "c675f420-cfeb-48ff-ba2a-9d2a4dbe3fcd",
@@ -107,8 +108,8 @@ data:
"server": null,
"size": 10000000000,
"volume_type": "l_ssd"
- }
-}
+ }
+ }
"""
from ansible_collections.community.general.plugins.module_utils.scaleway import SCALEWAY_LOCATION, scaleway_argument_spec, Scaleway
diff --git a/plugins/modules/scaleway_volume_info.py b/plugins/modules/scaleway_volume_info.py
index 1b2e95f88c..8a4986a724 100644
--- a/plugins/modules/scaleway_volume_info.py
+++ b/plugins/modules/scaleway_volume_info.py
@@ -57,19 +57,19 @@ scaleway_volume_info:
type: list
elements: dict
sample:
- "scaleway_volume_info": [
- {
- "creation_date": "2018-08-14T20:56:24.949660+00:00",
- "export_uri": null,
- "id": "b8d51a06-daeb-4fef-9539-a8aea016c1ba",
- "modification_date": "2018-08-14T20:56:24.949660+00:00",
- "name": "test-volume",
- "organization": "3f709602-5e6c-4619-b80c-e841c89734af",
- "server": null,
- "size": 50000000000,
- "state": "available",
- "volume_type": "l_ssd"
- }
+ [
+ {
+ "creation_date": "2018-08-14T20:56:24.949660+00:00",
+ "export_uri": null,
+ "id": "b8d51a06-daeb-4fef-9539-a8aea016c1ba",
+ "modification_date": "2018-08-14T20:56:24.949660+00:00",
+ "name": "test-volume",
+ "organization": "3f709602-5e6c-4619-b80c-e841c89734af",
+ "server": null,
+ "size": 50000000000,
+ "state": "available",
+ "volume_type": "l_ssd"
+ }
]
"""
diff --git a/plugins/modules/selinux_permissive.py b/plugins/modules/selinux_permissive.py
index b5c0ee4a61..c6107309ac 100644
--- a/plugins/modules/selinux_permissive.py
+++ b/plugins/modules/selinux_permissive.py
@@ -24,7 +24,7 @@ attributes:
options:
domain:
description:
- - The domain that will be added or removed from the list of permissive domains.
+ - The domain that is added or removed from the list of permissive domains.
type: str
required: true
aliases: [name]
diff --git a/plugins/modules/selogin.py b/plugins/modules/selogin.py
index 8f1b20c230..408d9221da 100644
--- a/plugins/modules/selogin.py
+++ b/plugins/modules/selogin.py
@@ -34,7 +34,8 @@ options:
type: str
aliases: [serange]
description:
- - MLS/MCS Security Range (MLS/MCS Systems only) SELinux Range for SELinux login mapping defaults to the SELinux user record range.
+ - MLS/MCS Security Range (MLS/MCS Systems only) SELinux Range for SELinux login mapping defaults to the SELinux user
+ record range.
default: s0
state:
type: str
diff --git a/plugins/modules/sendgrid.py b/plugins/modules/sendgrid.py
index 1099f579e1..c0e4b239bc 100644
--- a/plugins/modules/sendgrid.py
+++ b/plugins/modules/sendgrid.py
@@ -17,8 +17,8 @@ description:
notes:
- This module is non-idempotent because it sends an email through the external API. It is idempotent only in the case that
the module fails.
- - Like the other notification modules, this one requires an external dependency to work. In this case, you will need an
- active SendGrid account.
+ - Like the other notification modules, this one requires an external dependency to work. In this case, you need an active
+ SendGrid account.
- In order to use O(api_key), O(cc), O(bcc), O(attachments), O(from_name), O(html_body), and O(headers) you must C(pip install
sendgrid).
requirements:
@@ -82,7 +82,7 @@ options:
- The name you want to appear in the from field, for example V(John Doe).
html_body:
description:
- - Whether the body is html content that should be rendered.
+ - Whether the body is HTML content that should be rendered.
type: bool
default: false
headers:
@@ -214,19 +214,19 @@ def post_sendgrid_api(module, username, password, from_address, to_addresses,
def main():
module = AnsibleModule(
argument_spec=dict(
- username=dict(required=False),
- password=dict(required=False, no_log=True),
- api_key=dict(required=False, no_log=True),
- bcc=dict(required=False, type='list', elements='str'),
- cc=dict(required=False, type='list', elements='str'),
- headers=dict(required=False, type='dict'),
+ username=dict(),
+ password=dict(no_log=True),
+ api_key=dict(no_log=True),
+ bcc=dict(type='list', elements='str'),
+ cc=dict(type='list', elements='str'),
+ headers=dict(type='dict'),
from_address=dict(required=True),
- from_name=dict(required=False),
+ from_name=dict(),
to_addresses=dict(required=True, type='list', elements='str'),
subject=dict(required=True),
body=dict(required=True),
- html_body=dict(required=False, default=False, type='bool'),
- attachments=dict(required=False, type='list', elements='path')
+ html_body=dict(default=False, type='bool'),
+ attachments=dict(type='list', elements='path')
),
supports_check_mode=True,
mutually_exclusive=[
diff --git a/plugins/modules/sensu_check.py b/plugins/modules/sensu_check.py
index 2cac434923..a4b5771528 100644
--- a/plugins/modules/sensu_check.py
+++ b/plugins/modules/sensu_check.py
@@ -14,7 +14,7 @@ module: sensu_check
short_description: Manage Sensu checks
description:
- Manage the checks that should be run on a machine by I(Sensu).
- - Most options do not have a default and will not be added to the check definition unless specified.
+ - Most options do not have a default and are not added to the check definition unless specified.
- All defaults except O(path), O(state), O(backup) and O(metric) are not managed by this module, they are simply specified
for your convenience.
deprecated:
@@ -45,8 +45,8 @@ options:
type: str
description:
- Path to the JSON file of the check to be added/removed.
- - Will be created if it does not exist (unless O(state=absent)).
- - The parent folders need to exist when O(state=present), otherwise an error will be thrown.
+ - It is created if it does not exist (unless O(state=absent)).
+ - The parent folders need to exist when O(state=present), otherwise an error is thrown.
default: /etc/sensu/conf.d/checks.json
backup:
description:
@@ -99,7 +99,8 @@ options:
type: list
elements: str
description:
- - Other checks this check depends on, if dependencies fail handling of this check will be disabled.
+ - Other checks this one depends on.
+ - If dependencies fail handling of this check is disabled.
metric:
description:
- Whether the check is a metric.
diff --git a/plugins/modules/sensu_client.py b/plugins/modules/sensu_client.py
index 955a25f44f..f87621bd6d 100644
--- a/plugins/modules/sensu_client.py
+++ b/plugins/modules/sensu_client.py
@@ -14,7 +14,7 @@ author: "David Moreau Simard (@dmsimard)"
short_description: Manages Sensu client configuration
description:
- Manages Sensu client configuration.
- - 'For more information, refer to the L(Sensu documentation, https://sensuapp.org/docs/latest/reference/clients.html).'
+ - For more information, refer to the L(Sensu documentation, https://sensuapp.org/docs/latest/reference/clients.html).
deprecated:
removed_in: 13.0.0
why: Sensu Core and Sensu Enterprise products have been End of Life since 2019/20.
@@ -42,8 +42,8 @@ options:
type: str
description:
- An address to help identify and reach the client. This is only informational, usually an IP address or hostname.
- - If not specified it defaults to non-loopback IPv4 address as determined by Ruby C(Socket.ip_address_list) (provided by
- Sensu).
+ - If not specified it defaults to non-loopback IPv4 address as determined by Ruby C(Socket.ip_address_list) (provided
+ by Sensu).
subscriptions:
type: list
elements: str
@@ -158,7 +158,13 @@ config:
description: Effective client configuration, when state is present.
returned: success
type: dict
- sample: {'name': 'client', 'subscriptions': ['default']}
+ sample:
+ {
+ "name": "client",
+ "subscriptions": [
+ "default"
+ ]
+ }
file:
description: Path to the client configuration file.
returned: success
diff --git a/plugins/modules/sensu_handler.py b/plugins/modules/sensu_handler.py
index ff4a77a6ff..5b5494bf1c 100644
--- a/plugins/modules/sensu_handler.py
+++ b/plugins/modules/sensu_handler.py
@@ -14,7 +14,7 @@ author: "David Moreau Simard (@dmsimard)"
short_description: Manages Sensu handler configuration
description:
- Manages Sensu handler configuration.
- - 'For more information, refer to the L(Sensu documentation, https://sensuapp.org/docs/latest/reference/handlers.html).'
+ - For more information, refer to the L(Sensu documentation, https://sensuapp.org/docs/latest/reference/handlers.html).
deprecated:
removed_in: 13.0.0
why: Sensu Core and Sensu Enterprise products have been End of Life since 2019/20.
@@ -57,7 +57,7 @@ options:
type: list
elements: str
description:
- - An array of check result severities the handler will handle.
+ - An array of check result severities the handler handles.
- 'NOTE: event resolution bypasses this filtering.'
- "Example: [ 'warning', 'critical', 'unknown' ]."
mutator:
@@ -155,7 +155,12 @@ config:
description: Effective handler configuration, when state is present.
returned: success
type: dict
- sample: {'name': 'irc', 'type': 'pipe', 'command': '/usr/local/bin/notify-irc.sh'}
+ sample:
+ {
+ "name": "irc",
+ "type": "pipe",
+ "command": "/usr/local/bin/notify-irc.sh"
+ }
file:
description: Path to the handler configuration file.
returned: success
diff --git a/plugins/modules/sensu_silence.py b/plugins/modules/sensu_silence.py
index a6d699f4c1..91e6f63496 100644
--- a/plugins/modules/sensu_silence.py
+++ b/plugins/modules/sensu_silence.py
@@ -38,10 +38,10 @@ options:
expire:
type: int
description:
- - If specified, the silence entry will be automatically cleared after this number of seconds.
+ - If specified, the silence entry is automatically cleared after this number of seconds.
expire_on_resolve:
description:
- - If specified as true, the silence entry will be automatically cleared once the condition it is silencing is resolved.
+ - If specified as true, the silence entry is automatically cleared once the condition it is silencing is resolved.
type: bool
reason:
type: str
@@ -202,7 +202,7 @@ def create(
expire_on_resolve, reason, subscription):
(rc, out, changed) = query(module, url, check, subscription)
for i in out:
- if (i['subscription'] == subscription):
+ if i['subscription'] == subscription:
if (
(check is None or check == i['check']) and
(
@@ -265,14 +265,14 @@ def create(
def main():
module = AnsibleModule(
argument_spec=dict(
- check=dict(required=False),
- creator=dict(required=False),
- expire=dict(type='int', required=False),
- expire_on_resolve=dict(type='bool', required=False),
- reason=dict(required=False),
+ check=dict(),
+ creator=dict(),
+ expire=dict(type='int'),
+ expire_on_resolve=dict(type='bool'),
+ reason=dict(),
state=dict(default='present', choices=['present', 'absent']),
subscription=dict(required=True),
- url=dict(required=False, default='http://127.0.01:4567'),
+ url=dict(default='http://127.0.01:4567'),
),
supports_check_mode=True
)
diff --git a/plugins/modules/serverless.py b/plugins/modules/serverless.py
index 937f7dcdea..8bba307440 100644
--- a/plugins/modules/serverless.py
+++ b/plugins/modules/serverless.py
@@ -51,7 +51,7 @@ options:
deploy:
description:
- Whether or not to deploy artifacts after building them.
- - When this option is V(false) all the functions will be built, but no stack update will be run to send them out.
+ - When this option is V(false) all the functions are built, but no stack update is run to send them out.
- This is mostly useful for generating artifacts to be stored/deployed elsewhere.
type: bool
default: true
diff --git a/plugins/modules/simpleinit_msb.py b/plugins/modules/simpleinit_msb.py
index f4b017a410..ca1371653c 100644
--- a/plugins/modules/simpleinit_msb.py
+++ b/plugins/modules/simpleinit_msb.py
@@ -17,8 +17,6 @@ short_description: Manage services on Source Mage GNU/Linux
version_added: 7.5.0
description:
- Controls services on remote hosts using C(simpleinit-msb).
-notes:
- - This module needs ansible-core 2.15.5 or newer. Older versions have a broken and insufficient daemonize functionality.
author: "Vlad Glagolev (@vaygr)"
extends_documentation_fragment:
- community.general.attributes
diff --git a/plugins/modules/sl_vm.py b/plugins/modules/sl_vm.py
index c5a986843c..8b199f5698 100644
--- a/plugins/modules/sl_vm.py
+++ b/plugins/modules/sl_vm.py
@@ -173,8 +173,16 @@ options:
type: int
requirements:
- softlayer >= 4.1.1
+notes:
+ - If using Python 2.7, you must install C(softlayer-python<=5.7.2).
+ - If using Python 3.6, you must install C(softlayer-python<=6.0.0).
+ - The C(softlayer-python) library, at version 6.2.6 (from Jan 2025), only supports Python version 3.8, 3.9 and 3.10.
author:
- Matt Colton (@mcltn)
+seealso:
+ - name: SoftLayer API Python Client
+ description: The SoftLayer API Python Client is required for this module.
+ link: https://github.com/SoftLayer/softlayer-python
"""
EXAMPLES = r"""
@@ -267,7 +275,7 @@ EXAMPLES = r"""
"""
# TODO: Disabled RETURN as it is breaking the build for docs. Needs to be fixed.
-RETURN = """# """
+RETURN = """#"""
import json
import time
@@ -311,9 +319,9 @@ def create_virtual_instance(module):
return False, None
# Check if OS or Image Template is provided (Can't be both, defaults to OS)
- if (module.params.get('os_code') is not None and module.params.get('os_code') != ''):
+ if module.params.get('os_code') is not None and module.params.get('os_code') != '':
module.params['image_id'] = ''
- elif (module.params.get('image_id') is not None and module.params.get('image_id') != ''):
+ elif module.params.get('image_id') is not None and module.params.get('image_id') != '':
module.params['os_code'] = ''
module.params['disks'] = [] # Blank out disks since it will use the template
else:
diff --git a/plugins/modules/slack.py b/plugins/modules/slack.py
index 61ccfbfc9e..e009320d85 100644
--- a/plugins/modules/slack.py
+++ b/plugins/modules/slack.py
@@ -32,21 +32,24 @@ options:
domain:
type: str
description:
- - Slack (sub)domain for your environment without protocol. (For example V(example.slack.com).) In Ansible 1.8 and beyond,
- this is deprecated and may be ignored. See token documentation for information.
+ - "When using new format 'Webhook token' and WebAPI tokens: this can be V(slack.com) or V(slack-gov.com) and is ignored
+ otherwise."
+ - "When using old format 'Webhook token': Slack (sub)domain for your environment without protocol. (For example V(example.slack.com).)
+ in Ansible 1.8 and beyond, this is deprecated and may be ignored. See token documentation for information."
token:
type: str
description:
- - Slack integration token. This authenticates you to the slack service. Make sure to use the correct type of token,
+ - Slack integration token. This authenticates you to the Slack service. Make sure to use the correct type of token,
depending on what method you use.
- 'Webhook token: Prior to Ansible 1.8, a token looked like V(3Ffe373sfhRE6y42Fg3rvf4GlK). In Ansible 1.8 and above,
- Ansible adapts to the new slack API where tokens look like V(G922VJP24/D921DW937/3Ffe373sfhRE6y42Fg3rvf4GlK). If tokens
- are in the new format then slack will ignore any value of domain. If the token is in the old format the domain is
- required. Ansible has no control of when slack will get rid of the old API. When slack does that the old format will
- stop working. ** Please keep in mind the tokens are not the API tokens but are the webhook tokens. In slack these
- are found in the webhook URL which are obtained under the apps and integrations. The incoming webhooks can be added
- in that area. In some cases this may be locked by your Slack admin and you must request access. It is there that the
- incoming webhooks can be added. The key is on the end of the URL given to you in that section.'
+ Ansible adapts to the new Slack API where tokens look like V(G922VJP24/D921DW937/3Ffe373sfhRE6y42Fg3rvf4GlK). If tokens
+ are in the new format then Slack ignores any value of domain except V(slack.com) or V(slack-gov.com). If the token
+ is in the old format the domain is required. Ansible has no control of when Slack is going to remove the old API.
+ When Slack does that the old format is going to cease working. B(Please keep in mind the tokens are not the API tokens
+ but are the webhook tokens.) In Slack these are found in the webhook URL which are obtained under the apps and integrations.
+ The incoming webhooks can be added in that area. In some cases this may be locked by your Slack admin and you must
+ request access. It is there that the incoming webhooks can be added. The key is on the end of the URL given to you
+ in that section.'
- "WebAPI token: Slack WebAPI requires a personal, bot or work application token. These tokens start with V(xoxp-),
V(xoxb-) or V(xoxa-), for example V(xoxb-1234-56789abcdefghijklmnop). WebAPI token is required if you intend to receive
thread_id. See Slack's documentation (U(https://api.slack.com/docs/token-types)) for more information."
@@ -56,7 +59,8 @@ options:
description:
- Message to send. Note that the module does not handle escaping characters. Plain-text angle brackets and ampersands
should be converted to HTML entities (for example C(&) to C(&)) before sending. See Slack's documentation
- (U(https://api.slack.com/docs/message-formatting)) for more.
+ (U(https://api.slack.com/docs/message-formatting))
+ for more.
channel:
type: str
description:
@@ -88,7 +92,7 @@ options:
type: str
description:
- Emoji for the message sender. See Slack documentation for options.
- - If O(icon_emoji) is set, O(icon_url) will not be used.
+ - If O(icon_emoji) is set, O(icon_url) is not used.
link_names:
type: int
description:
@@ -106,8 +110,8 @@ options:
- 'none'
validate_certs:
description:
- - If V(false), SSL certificates will not be validated. This should only be used on personally controlled sites using
- self-signed certificates.
+ - If V(false), SSL certificates are not validated. This should only be used on personally controlled sites using self-signed
+ certificates.
type: bool
default: true
color:
@@ -137,11 +141,12 @@ options:
- Setting for automatically prepending a V(#) symbol on the passed in O(channel).
- The V(auto) method prepends a V(#) unless O(channel) starts with one of V(#), V(@), V(C0), V(GF), V(G0), V(CP). These
prefixes only cover a small set of the prefixes that should not have a V(#) prepended. Since an exact condition which
- O(channel) values must not have the V(#) prefix is not known, the value V(auto) for this option will be deprecated
- in the future. It is best to explicitly set O(prepend_hash=always) or O(prepend_hash=never) to obtain the needed behavior.
- - The B(current default) is V(auto), which has been B(deprecated) since community.general 10.2.0. It will change to
- V(never) in community.general 12.0.0. To prevent deprecation warnings you can explicitly set O(prepend_hash) to the
- value you want. We suggest to only use V(always) or V(never), but not V(auto), when explicitly setting a value.
+ O(channel) values must not have the V(#) prefix is not known, the value V(auto) for this option is deprecated in the
+ future. It is best to explicitly set O(prepend_hash=always) or O(prepend_hash=never) to obtain the needed behavior.
+ - The B(current default) is V(auto), which has been B(deprecated) since community.general 10.2.0. It is going to change
+ to V(never) in community.general 12.0.0. To prevent deprecation warnings you can explicitly set O(prepend_hash) to
+ the value you want. We suggest to only use V(always) or V(never), but not V(auto), when explicitly setting a value.
+ # when the default changes in community.general 12.0.0, add deprecation for the `auto` value for 14.0.0
choices:
- 'always'
- 'never'
@@ -267,10 +272,10 @@ from ansible.module_utils.six.moves.urllib.parse import urlencode
from ansible.module_utils.urls import fetch_url
OLD_SLACK_INCOMING_WEBHOOK = 'https://%s/services/hooks/incoming-webhook?token=%s'
-SLACK_INCOMING_WEBHOOK = 'https://hooks.slack.com/services/%s'
-SLACK_POSTMESSAGE_WEBAPI = 'https://slack.com/api/chat.postMessage'
-SLACK_UPDATEMESSAGE_WEBAPI = 'https://slack.com/api/chat.update'
-SLACK_CONVERSATIONS_HISTORY_WEBAPI = 'https://slack.com/api/conversations.history'
+SLACK_INCOMING_WEBHOOK = 'https://hooks.%s/services/%s'
+SLACK_POSTMESSAGE_WEBAPI = 'https://%s/api/chat.postMessage'
+SLACK_UPDATEMESSAGE_WEBAPI = 'https://%s/api/chat.update'
+SLACK_CONVERSATIONS_HISTORY_WEBAPI = 'https://%s/api/conversations.history'
# Escaping quotes and apostrophes to avoid ending string prematurely in ansible call.
# We do not escape other characters used as Slack metacharacters (e.g. &, <, >).
@@ -372,7 +377,11 @@ def build_payload_for_slack(text, channel, thread_id, username, icon_url, icon_e
return payload
-def get_slack_message(module, token, channel, ts):
+def validate_slack_domain(domain):
+ return (domain if domain in ('slack.com', 'slack-gov.com') else 'slack.com')
+
+
+def get_slack_message(module, domain, token, channel, ts):
headers = {
'Content-Type': 'application/json; charset=UTF-8',
'Accept': 'application/json',
@@ -384,7 +393,8 @@ def get_slack_message(module, token, channel, ts):
'limit': 1,
'inclusive': 'true',
})
- url = SLACK_CONVERSATIONS_HISTORY_WEBAPI + '?' + qs
+ domain = validate_slack_domain(domain)
+ url = (SLACK_CONVERSATIONS_HISTORY_WEBAPI % domain) + '?' + qs
response, info = fetch_url(module=module, url=url, headers=headers, method='GET')
if info['status'] != 200:
module.fail_json(msg="failed to get slack message")
@@ -402,9 +412,11 @@ def do_notify_slack(module, domain, token, payload):
use_webapi = False
if token.count('/') >= 2:
# New style webhook token
- slack_uri = SLACK_INCOMING_WEBHOOK % token
+ domain = validate_slack_domain(domain)
+ slack_uri = SLACK_INCOMING_WEBHOOK % (domain, token)
elif re.match(r'^xox[abp]-\S+$', token):
- slack_uri = SLACK_UPDATEMESSAGE_WEBAPI if 'ts' in payload else SLACK_POSTMESSAGE_WEBAPI
+ domain = validate_slack_domain(domain)
+ slack_uri = (SLACK_UPDATEMESSAGE_WEBAPI if 'ts' in payload else SLACK_POSTMESSAGE_WEBAPI) % domain
use_webapi = True
else:
if not domain:
@@ -426,7 +438,7 @@ def do_notify_slack(module, domain, token, payload):
if use_webapi:
obscured_incoming_webhook = slack_uri
else:
- obscured_incoming_webhook = SLACK_INCOMING_WEBHOOK % '[obscured]'
+ obscured_incoming_webhook = SLACK_INCOMING_WEBHOOK % (domain, '[obscured]')
module.fail_json(msg=" failed to send %s to %s: %s" % (data, obscured_incoming_webhook, info['msg']))
# each API requires different handling
@@ -494,7 +506,7 @@ def main():
# if updating an existing message, we can check if there's anything to update
if message_id is not None:
changed = False
- msg = get_slack_message(module, token, channel, message_id)
+ msg = get_slack_message(module, domain, token, channel, message_id)
for key in ('icon_url', 'icon_emoji', 'link_names', 'color', 'attachments', 'blocks'):
if msg.get(key) != module.params.get(key):
changed = True
diff --git a/plugins/modules/smartos_image_info.py b/plugins/modules/smartos_image_info.py
index 19ad740b72..89c00f5c26 100644
--- a/plugins/modules/smartos_image_info.py
+++ b/plugins/modules/smartos_image_info.py
@@ -47,12 +47,20 @@ EXAMPLES = r"""
- name: Print information
ansible.builtin.debug:
- msg: "{{ result.smartos_images[item]['name'] }}-{{ result.smartos_images[item]['version'] }} has {{ result.smartos_images[item]['clones'] }} VM(s)"
+ msg: >-
+ {{
+ result.smartos_images[item]['name'] }}-{{ result.smartos_images[item]['version'] }}
+ has {{ result.smartos_images[item]['clones']
+ }} VM(s)
with_items: "{{ result.smartos_images.keys() | list }}"
- name: Print information
ansible.builtin.debug:
- msg: "{{ smartos_images[item]['name'] }}-{{ smartos_images[item]['version'] }} has {{ smartos_images[item]['clones'] }} VM(s)"
+ msg: >-
+ {{
+ smartos_images[item]['name'] }}-{{ smartos_images[item]['version'] }}
+ has {{ smartos_images[item]['clones']
+ }} VM(s)
with_items: "{{ smartos_images.keys() | list }}"
"""
diff --git a/plugins/modules/snap.py b/plugins/modules/snap.py
index 29fd08394f..fd424e0dd9 100644
--- a/plugins/modules/snap.py
+++ b/plugins/modules/snap.py
@@ -37,8 +37,8 @@ options:
state:
description:
- Desired state of the package.
- - When O(state=present) the module will use C(snap install) if the snap is not installed, and C(snap refresh) if it
- is installed but from a different channel.
+ - When O(state=present) the module uses C(snap install) if the snap is not installed, and C(snap refresh) if it is installed
+ but from a different channel.
default: present
choices: [absent, present, enabled, disabled]
type: str
@@ -56,19 +56,19 @@ options:
description:
- Define which release of a snap is installed and tracked for updates. This option can only be specified if there is
a single snap in the task.
- - If not passed, the C(snap) command will default to V(stable).
- - If the value passed does not contain the C(track), it will default to C(latest). For example, if V(edge) is passed,
- the module will assume the channel to be V(latest/edge).
+ - If not passed, the C(snap) command defaults to V(stable).
+ - If the value passed does not contain the C(track), it defaults to C(latest). For example, if V(edge) is passed, the
+ module assumes the channel to be V(latest/edge).
- See U(https://snapcraft.io/docs/channels) for more details about snap channels.
type: str
required: false
options:
description:
- - Set options with pattern C(key=value) or C(snap:key=value). If a snap name is given, the option will be applied to
- that snap only. If the snap name is omitted, the options will be applied to all snaps listed in O(name). Options will
- only be applied to active snaps.
- - Options will only be applied when C(state) is set to V(present). This is done after the necessary installation or
- refresh (upgrade/downgrade) of all the snaps listed in O(name).
+ - Set options with pattern C(key=value) or C(snap:key=value). If a snap name is given, the option is applied to that
+ snap only. If the snap name is omitted, the options are applied to all snaps listed in O(name). Options are only applied
+ to active snaps.
+ - Options are only applied when C(state) is set to V(present). This is done after the necessary installation or refresh
+ (upgrade/downgrade) of all the snaps listed in O(name).
- See U(https://snapcraft.io/docs/configuration-in-snaps) for more details about snap configuration options.
required: false
type: list
@@ -203,7 +203,6 @@ class Snap(StateModuleHelper):
},
supports_check_mode=True,
)
- use_old_vardict = False
@staticmethod
def _first_non_zero(a):
diff --git a/plugins/modules/snap_alias.py b/plugins/modules/snap_alias.py
index b7244ed74d..3837f2b5a6 100644
--- a/plugins/modules/snap_alias.py
+++ b/plugins/modules/snap_alias.py
@@ -109,7 +109,6 @@ class SnapAlias(StateModuleHelper):
],
supports_check_mode=True,
)
- use_old_vardict = False
def _aliases(self):
n = self.vars.name
diff --git a/plugins/modules/snmp_facts.py b/plugins/modules/snmp_facts.py
index 74aca27c40..17c7bbd032 100644
--- a/plugins/modules/snmp_facts.py
+++ b/plugins/modules/snmp_facts.py
@@ -15,7 +15,7 @@ author:
- Patrick Ogenstad (@ogenstad)
short_description: Retrieve facts for a device using SNMP
description:
- - Retrieve facts for a device using SNMP, the facts will be inserted to the C(ansible_facts) key.
+ - Retrieve facts for a device using SNMP, the facts are inserted to the C(ansible_facts) key.
requirements:
- pysnmp
extends_documentation_fragment:
@@ -113,7 +113,7 @@ ansible_sysdescr:
description: A textual description of the entity.
returned: success
type: str
- sample: Linux ubuntu-user 4.4.0-93-generic #116-Ubuntu SMP Fri Aug 11 21:17:51 UTC 2017 x86_64
+ sample: "Linux ubuntu-user 4.4.0-93-generic #116-Ubuntu SMP Fri Aug 11 21:17:51 UTC 2017 x86_64"
ansible_sysobjectid:
description: The vendor's authoritative identification of the network management subsystem contained in the entity.
returned: success
@@ -149,40 +149,41 @@ ansible_interfaces:
description: Dictionary of each network interface and its metadata.
returned: success
type: dict
- sample: {
- "1": {
- "adminstatus": "up",
- "description": "",
- "ifindex": "1",
- "ipv4": [
- {
- "address": "127.0.0.1",
- "netmask": "255.0.0.0"
- }
- ],
- "mac": "",
- "mtu": "65536",
- "name": "lo",
- "operstatus": "up",
- "speed": "65536"
- },
- "2": {
- "adminstatus": "up",
- "description": "",
- "ifindex": "2",
- "ipv4": [
- {
- "address": "192.168.213.128",
- "netmask": "255.255.255.0"
- }
- ],
- "mac": "000a305a52a1",
- "mtu": "1500",
- "name": "Intel Corporation 82545EM Gigabit Ethernet Controller (Copper)",
- "operstatus": "up",
- "speed": "1500"
+ sample:
+ {
+ "1": {
+ "adminstatus": "up",
+ "description": "",
+ "ifindex": "1",
+ "ipv4": [
+ {
+ "address": "127.0.0.1",
+ "netmask": "255.0.0.0"
+ }
+ ],
+ "mac": "",
+ "mtu": "65536",
+ "name": "lo",
+ "operstatus": "up",
+ "speed": "65536"
+ },
+ "2": {
+ "adminstatus": "up",
+ "description": "",
+ "ifindex": "2",
+ "ipv4": [
+ {
+ "address": "192.168.213.128",
+ "netmask": "255.255.255.0"
+ }
+ ],
+ "mac": "000a305a52a1",
+ "mtu": "1500",
+ "name": "Intel Corporation 82545EM Gigabit Ethernet Controller (Copper)",
+ "operstatus": "up",
+ "speed": "1500"
+ }
}
- }
"""
import binascii
diff --git a/plugins/modules/solaris_zone.py b/plugins/modules/solaris_zone.py
index 31e7919c08..431e0cb31d 100644
--- a/plugins/modules/solaris_zone.py
+++ b/plugins/modules/solaris_zone.py
@@ -51,7 +51,7 @@ options:
required: true
path:
description:
- - The path where the zone will be created. This is required when the zone is created, but not used otherwise.
+ - The path where the zone is created. This is required when the zone is created, but not used otherwise.
type: str
sparse:
description:
@@ -60,7 +60,7 @@ options:
default: false
root_password:
description:
- - The password hash for the root account. If not specified, the zone's root account will not have a password.
+ - The password hash for the root account. If not specified, the zone's root account does not have a password.
type: str
config:
description:
diff --git a/plugins/modules/sorcery.py b/plugins/modules/sorcery.py
index fff3f55e07..de50741185 100644
--- a/plugins/modules/sorcery.py
+++ b/plugins/modules/sorcery.py
@@ -34,7 +34,7 @@ options:
description:
- Name of the spell or grimoire.
- Multiple names can be given, separated by commas.
- - Special value V(*) in conjunction with states V(latest) or V(rebuild) will update or rebuild the whole system respectively.
+ - Special value V(*) in conjunction with states V(latest) or V(rebuild) updates or rebuilds the whole system respectively.
- The alias O(grimoire) was added in community.general 7.3.0.
aliases: ["spell", "grimoire"]
type: list
@@ -44,7 +44,7 @@ options:
description:
- Repository location.
- If specified, O(name) represents grimoire(s) instead of spell(s).
- - Special value V(*) will pull grimoire from the official location.
+ - Special value V(*) pulls grimoire from the official location.
- Only single item in O(name) in conjunction with V(*) can be used.
- O(state=absent) must be used with a special value V(*).
type: str
@@ -697,11 +697,11 @@ def manage_spells(module):
def main():
module = AnsibleModule(
argument_spec=dict(
- name=dict(default=None, aliases=['spell', 'grimoire'], type='list', elements='str'),
- repository=dict(default=None, type='str'),
+ name=dict(aliases=['spell', 'grimoire'], type='list', elements='str'),
+ repository=dict(type='str'),
state=dict(default='present', choices=['present', 'latest',
'absent', 'cast', 'dispelled', 'rebuild']),
- depends=dict(default=None),
+ depends=dict(),
update=dict(default=False, type='bool'),
update_cache=dict(default=False, aliases=['update_codex'], type='bool'),
cache_valid_time=dict(default=0, type='int')
diff --git a/plugins/modules/spectrum_device.py b/plugins/modules/spectrum_device.py
index 8bf4aa41b5..54cddbffb0 100644
--- a/plugins/modules/spectrum_device.py
+++ b/plugins/modules/spectrum_device.py
@@ -30,7 +30,7 @@ options:
required: true
description:
- IP address of the device.
- - If a hostname is given, it will be resolved to the IP address.
+ - If a hostname is given, it is resolved to the IP address.
community:
type: str
description:
@@ -69,13 +69,13 @@ options:
- Oneclick user password.
use_proxy:
description:
- - If V(false), it will not use a proxy, even if one is defined in an environment variable on the target hosts.
+ - If V(false), it does not use a proxy, even if one is defined in an environment variable on the target hosts.
default: true
type: bool
validate_certs:
description:
- - If V(false), SSL certificates will not be validated. This should only be used on personally controlled sites using
- self-signed certificates.
+ - If V(false), SSL certificates are not validated. This should only be used on personally controlled sites using self-signed
+ certificates.
default: true
type: bool
agentport:
@@ -85,8 +85,8 @@ options:
- UDP port used for SNMP discovery.
default: 161
notes:
- - The devices will be created inside the I(Universe) container of the specified landscape.
- - All the operations will be performed only on the specified landscape.
+ - The devices are created inside the I(Universe) container of the specified landscape.
+ - All the operations are performed only on the specified landscape.
"""
EXAMPLES = r"""
@@ -119,7 +119,12 @@ device:
description: Device data when O(state=present).
returned: success
type: dict
- sample: {'model_handle': '0x1007ab', 'landscape': '0x100000', 'address': '10.10.5.1'}
+ sample:
+ {
+ "model_handle": "0x1007ab",
+ "landscape": "0x100000",
+ "address": "10.10.5.1"
+ }
"""
from socket import gethostbyname, gaierror
diff --git a/plugins/modules/spectrum_model_attrs.py b/plugins/modules/spectrum_model_attrs.py
index 9c9fba4deb..53cae10b74 100644
--- a/plugins/modules/spectrum_model_attrs.py
+++ b/plugins/modules/spectrum_model_attrs.py
@@ -47,7 +47,7 @@ options:
aliases: [password]
use_proxy:
description:
- - If V(false), it will not use a proxy, even if one is defined in an environment variable on the target hosts.
+ - If V(false), it does not use a proxy, even if one is defined in an environment variable on the target hosts.
default: true
required: false
type: bool
@@ -99,7 +99,7 @@ options:
- C(sysName) (C(0x10b5b));
- C(Vendor_Name) (C(0x11570));
- C(Description) (C(0x230017)).
- - Hex IDs are the direct identifiers in Spectrum and will always work.
+ - Hex IDs are the direct identifiers in Spectrum and always work.
- 'To lookup hex IDs go to the UI: Locator -> Devices -> By Model Name -> -> Attributes tab.'
type: str
required: true
@@ -123,7 +123,9 @@ EXAMPLES = r"""
- name: "isManaged"
value: "false"
- name: "Notes"
- value: "MM set on {{ ansible_date_time.iso8601 }} via CO {{ CO }} by {{ tower_user_name | default(ansible_user_id) }}"
+ value: >-
+ MM set on {{ ansible_date_time.iso8601 }} via CO {{ CO }}
+ by {{ tower_user_name | default(ansible_user_id) }}
delegate_to: localhost
register: spectrum_model_attrs_status
"""
diff --git a/plugins/modules/spotinst_aws_elastigroup.py b/plugins/modules/spotinst_aws_elastigroup.py
index 959dc6acca..759a094626 100644
--- a/plugins/modules/spotinst_aws_elastigroup.py
+++ b/plugins/modules/spotinst_aws_elastigroup.py
@@ -11,9 +11,9 @@ short_description: Create, update or delete Spotinst AWS Elastigroups
author: Spotinst (@talzur)
description:
- Can create, update, or delete Spotinst AWS Elastigroups Launch configuration is part of the elastigroup configuration,
- so no additional modules are necessary for handling the launch configuration. You will have to have a credentials file
- in this location - C($HOME/.spotinst/credentials). The credentials file must contain a row that looks like this C(token
- = ).
+ so no additional modules are necessary for handling the launch configuration. You must have a credentials file in this
+ location - C($HOME/.spotinst/credentials). The credentials file must contain a row that looks like this C(token = ).
- Full documentation available at U(https://help.spotinst.com/hc/en-us/articles/115003530285-Ansible-).
requirements:
- spotinst_sdk >= 1.0.38
@@ -41,8 +41,8 @@ options:
token:
description:
- A Personal API Access Token issued by Spotinst.
- - 'When not specified, the module will try to obtain it, in that order, from: environment variable E(SPOTINST_TOKEN),
- or from the credentials path.'
+ - When not specified, the module tries to obtain it, in that order, from environment variable E(SPOTINST_TOKEN), or
+ from the credentials path.
type: str
availability_vs_cost:
@@ -82,8 +82,7 @@ options:
ebs_optimized:
description:
- - Enable EBS optimization for supported instances which are not enabled by default.; Note - additional charges will
- be applied.
+ - Enable EBS optimization for supported instances which are not enabled by default. Note - additional charges are applied.
type: bool
ebs_volume_pool:
@@ -106,7 +105,7 @@ options:
fallback_to_od:
description:
- - In case of no spots available, Elastigroup will launch an On-demand instance instead.
+ - In case of no spots available, Elastigroup launches an On-demand instance instead.
type: bool
health_check_grace_period:
@@ -140,15 +139,15 @@ options:
id:
description:
- - The group ID if it already exists and you want to update, or delete it. This will not work unless the uniqueness_by
- field is set to ID. When this is set, and the uniqueness_by field is set, the group will either be updated or deleted,
+ - The group ID if it already exists and you want to update, or delete it. This does not work unless the O(uniqueness_by)
+ field is set to ID. When this is set, and the O(uniqueness_by) field is set, the group is either updated or deleted,
but not created.
type: str
image_id:
description:
- - The image ID used to launch the instance.; In case of conflict between Instance type and image type, an error will
- be returned.
+ - The image ID used to launch the instance.; In case of conflict between Instance type and image type, an error is be
+ returned.
required: true
type: str
@@ -214,13 +213,13 @@ options:
on_demand_count:
description:
- Required if risk is not set.
- - Number of on demand instances to launch. All other instances will be spot instances.; Either set this parameter or
- the risk parameter.
+ - Number of on demand instances to launch. All other instances are spot instances.; Either set this parameter or the
+ O(risk) parameter.
type: int
on_demand_instance_type:
description:
- - On-demand instance type that will be provisioned.
+ - On-demand instance type that is provisioned.
type: str
opsworks:
@@ -278,7 +277,7 @@ options:
security_group_ids:
description:
- One or more security group IDs.
- - In case of update it will override the existing Security Group with the new given array.
+ - In case of update it overrides the existing Security Group with the new given array.
required: true
type: list
elements: str
@@ -302,7 +301,7 @@ options:
spot_instance_types:
description:
- - Spot instance type that will be provisioned.
+ - Spot instance type that is provisioned.
required: true
type: list
elements: str
@@ -388,7 +387,7 @@ options:
- name
description:
- If your group names are not unique, you may use this feature to update or delete a specific group. Whenever this property
- is set, you must set a group_id in order to update or delete a group, otherwise a group will be created.
+ is set, you must set a group_id in order to update or delete a group, otherwise a group is created.
default: name
type: str
@@ -399,7 +398,7 @@ options:
utilize_reserved_instances:
description:
- - In case of any available Reserved Instances, Elastigroup will utilize your reservations before purchasing Spot instances.
+ - In case of any available Reserved Instances, Elastigroup utilizes your reservations before purchasing Spot instances.
type: bool
wait_for_instances:
@@ -697,8 +696,15 @@ instances:
description: List of active elastigroup instances and their details.
returned: success
type: dict
- sample: [{"spotInstanceRequestId": "sir-regs25zp", "instanceId": "i-09640ad8678234c", "instanceType": "m4.large", "product": "Linux/UNIX",
- "availabilityZone": "us-west-2b", "privateIp": "180.0.2.244", "createdAt": "2017-07-17T12:46:18.000Z", "status": "fulfilled"}]
+ sample:
+ - "spotInstanceRequestId": "sir-regs25zp"
+ "instanceId": "i-09640ad8678234c"
+ "instanceType": "m4.large"
+ "product": "Linux/UNIX"
+ "availabilityZone": "us-west-2b"
+ "privateIp": "180.0.2.244"
+ "createdAt": "2017-07-17T12:46:18.000Z"
+ "status": "fulfilled"
group_id:
description: Created / Updated group's ID.
returned: success
diff --git a/plugins/modules/ss_3par_cpg.py b/plugins/modules/ss_3par_cpg.py
index c9c9b4bd90..0869d67d84 100644
--- a/plugins/modules/ss_3par_cpg.py
+++ b/plugins/modules/ss_3par_cpg.py
@@ -38,7 +38,7 @@ options:
type: str
domain:
description:
- - Specifies the name of the domain in which the object will reside.
+ - Specifies the name of the domain in which the object resides.
type: str
growth_increment:
description:
@@ -46,11 +46,12 @@ options:
type: str
growth_limit:
description:
- - Specifies that the autogrow operation is limited to the specified storage amount that sets the growth limit(in MiB, GiB or TiB).
+ - Specifies that the autogrow operation is limited to the specified storage amount that sets the growth limit (in MiB,
+ GiB or TiB).
type: str
growth_warning:
description:
- - Specifies that the threshold(in MiB, GiB or TiB) of used logical disk space when exceeded results in a warning alert.
+ - Specifies that the threshold (in MiB, GiB or TiB) of used logical disk space when exceeded results in a warning alert.
type: str
high_availability:
choices:
diff --git a/plugins/modules/ssh_config.py b/plugins/modules/ssh_config.py
index 07637e8003..6a83095f37 100644
--- a/plugins/modules/ssh_config.py
+++ b/plugins/modules/ssh_config.py
@@ -49,7 +49,7 @@ options:
host:
description:
- The endpoint this configuration is valid for.
- - Can be an actual address on the internet or an alias that will connect to the value of O(hostname).
+ - It can be an actual address on the internet or an alias that connects to the value of O(hostname).
required: true
type: str
hostname:
@@ -66,7 +66,7 @@ options:
type: str
identity_file:
description:
- - The path to an identity file (SSH private key) that will be used when connecting to this host.
+ - The path to an identity file (SSH private key) that is used when connecting to this host.
- File need to exist and have mode V(0600) to be valid.
type: path
identities_only:
@@ -141,7 +141,7 @@ options:
version_added: 10.1.0
other_options:
description:
- - Provides the option to specify arbitrary SSH config entry options via a dictionary.
+ - Allows specifying arbitrary SSH config entry options using a dictionary.
- The key names must be lower case. Keys with upper case values are rejected.
- The values must be strings. Other values are rejected.
type: dict
@@ -162,6 +162,15 @@ EXAMPLES = r"""
other_options:
serveraliveinterval: '30'
+- name: Add SSH config with key auto-added to agent
+ community.general.ssh_config:
+ user: devops
+ host: "example.com"
+ hostname: "staging.example.com"
+ identity_file: "/home/devops/.ssh/id_rsa"
+ add_keys_to_agent: true
+ state: present
+
- name: Delete a host from the configuration
community.general.ssh_config:
ssh_config_file: "{{ ssh_config_test }}"
@@ -189,22 +198,27 @@ hosts_change_diff:
description: A list of host diff changes.
returned: on change
type: list
- sample: [
- {
- "example.com": {
- "new": {
- "hostname": "github.com",
- "identityfile": ["/tmp/test_ssh_config/fake_id_rsa"],
- "port": "2224"
- },
- "old": {
- "hostname": "github.com",
- "identityfile": ["/tmp/test_ssh_config/fake_id_rsa"],
- "port": "2224"
+ sample:
+ [
+ {
+ "example.com": {
+ "new": {
+ "hostname": "github.com",
+ "identityfile": [
+ "/tmp/test_ssh_config/fake_id_rsa"
+ ],
+ "port": "2224"
+ },
+ "old": {
+ "hostname": "github.com",
+ "identityfile": [
+ "/tmp/test_ssh_config/fake_id_rsa"
+ ],
+ "port": "2224"
+ }
}
}
- }
- ]
+ ]
"""
import os
@@ -376,7 +390,7 @@ class SSHConfig(object):
def main():
module = AnsibleModule(
argument_spec=dict(
- group=dict(default=None, type='str'),
+ group=dict(type='str'),
host=dict(type='str', required=True),
hostname=dict(type='str'),
host_key_algorithms=dict(type='str', no_log=False),
@@ -384,24 +398,20 @@ def main():
identities_only=dict(type='bool'),
other_options=dict(type='dict'),
port=dict(type='str'),
- proxycommand=dict(type='str', default=None),
- proxyjump=dict(type='str', default=None),
+ proxycommand=dict(type='str'),
+ proxyjump=dict(type='str'),
forward_agent=dict(type='bool'),
add_keys_to_agent=dict(type='bool'),
remote_user=dict(type='str'),
- ssh_config_file=dict(default=None, type='path'),
+ ssh_config_file=dict(type='path'),
state=dict(type='str', default='present', choices=['present', 'absent']),
- strict_host_key_checking=dict(
- type='str',
- default=None,
- choices=['yes', 'no', 'ask', 'accept-new'],
- ),
- controlmaster=dict(type='str', default=None, choices=['yes', 'no', 'ask', 'auto', 'autoask']),
- controlpath=dict(type='str', default=None),
- controlpersist=dict(type='str', default=None),
+ strict_host_key_checking=dict(type='str', choices=['yes', 'no', 'ask', 'accept-new']),
+ controlmaster=dict(type='str', choices=['yes', 'no', 'ask', 'auto', 'autoask']),
+ controlpath=dict(type='str'),
+ controlpersist=dict(type='str'),
dynamicforward=dict(type='str'),
- user=dict(default=None, type='str'),
- user_known_hosts_file=dict(type='str', default=None),
+ user=dict(type='str'),
+ user_known_hosts_file=dict(type='str'),
),
supports_check_mode=True,
mutually_exclusive=[
diff --git a/plugins/modules/stacki_host.py b/plugins/modules/stacki_host.py
index bfa4cccff5..095e0b7256 100644
--- a/plugins/modules/stacki_host.py
+++ b/plugins/modules/stacki_host.py
@@ -119,25 +119,6 @@ EXAMPLES = r"""
state: absent
"""
-RETURN = r"""
-changed:
- description: Response to whether or not the API call completed successfully.
- returned: always
- type: bool
- sample: true
-
-stdout:
- description: The set of responses from the commands.
- returned: always
- type: list
- sample: ['...', '...']
-
-stdout_lines:
- description: The value of stdout split into a list.
- returned: always
- type: list
- sample: [['...', '...'], ['...'], ['...']]
-"""
import json
diff --git a/plugins/modules/statusio_maintenance.py b/plugins/modules/statusio_maintenance.py
index 9928267bde..c823a286c5 100644
--- a/plugins/modules/statusio_maintenance.py
+++ b/plugins/modules/statusio_maintenance.py
@@ -111,7 +111,7 @@ options:
minutes:
type: int
description:
- - The length of time in UTC that the maintenance will run (starting from playbook runtime).
+ - The duration of the maintenance window (starting from playbook runtime).
default: 10
start_date:
type: str
@@ -344,30 +344,22 @@ def main():
api_id=dict(required=True),
api_key=dict(required=True, no_log=True),
statuspage=dict(required=True),
- state=dict(required=False, default='present',
- choices=['present', 'absent']),
- url=dict(default='https://api.status.io', required=False),
- components=dict(type='list', elements='str', required=False, default=None,
- aliases=['component']),
- containers=dict(type='list', elements='str', required=False, default=None,
- aliases=['container']),
- all_infrastructure_affected=dict(type='bool', default=False,
- required=False),
- automation=dict(type='bool', default=False, required=False),
- title=dict(required=False, default='A new maintenance window'),
- desc=dict(required=False, default='Created by Ansible'),
- minutes=dict(type='int', required=False, default=10),
- maintenance_notify_now=dict(type='bool', default=False,
- required=False),
- maintenance_notify_72_hr=dict(type='bool', default=False,
- required=False),
- maintenance_notify_24_hr=dict(type='bool', default=False,
- required=False),
- maintenance_notify_1_hr=dict(type='bool', default=False,
- required=False),
- maintenance_id=dict(required=False, default=None),
- start_date=dict(default=None, required=False),
- start_time=dict(default=None, required=False)
+ state=dict(default='present', choices=['present', 'absent']),
+ url=dict(default='https://api.status.io'),
+ components=dict(type='list', elements='str', aliases=['component']),
+ containers=dict(type='list', elements='str', aliases=['container']),
+ all_infrastructure_affected=dict(type='bool', default=False),
+ automation=dict(type='bool', default=False),
+ title=dict(default='A new maintenance window'),
+ desc=dict(default='Created by Ansible'),
+ minutes=dict(type='int', default=10),
+ maintenance_notify_now=dict(type='bool', default=False),
+ maintenance_notify_72_hr=dict(type='bool', default=False),
+ maintenance_notify_24_hr=dict(type='bool', default=False),
+ maintenance_notify_1_hr=dict(type='bool', default=False),
+ maintenance_id=dict(),
+ start_date=dict(),
+ start_time=dict()
),
supports_check_mode=True,
)
diff --git a/plugins/modules/sudoers.py b/plugins/modules/sudoers.py
index ac1ff91ff5..f353859a98 100644
--- a/plugins/modules/sudoers.py
+++ b/plugins/modules/sudoers.py
@@ -42,7 +42,7 @@ options:
required: true
description:
- The name of the sudoers rule.
- - This will be used for the filename for the sudoers file managed by this rule.
+ - This is used for the filename for the sudoers file managed by this rule.
type: str
noexec:
description:
@@ -52,7 +52,7 @@ options:
version_added: 8.4.0
nopassword:
description:
- - Whether a password is required when command is run with sudo.
+ - Whether a password is not required when command is run with sudo.
default: true
type: bool
setenv:
@@ -69,12 +69,12 @@ options:
version_added: 6.2.0
runas:
description:
- - Specify the target user the command(s) will run as.
+ - Specify the target user the command(s) runs as.
type: str
version_added: 4.7.0
sudoers_path:
description:
- - The path which sudoers config files will be managed in.
+ - The path which sudoers config files are managed in.
default: /etc/sudoers.d
type: str
state:
@@ -92,9 +92,9 @@ options:
type: str
validation:
description:
- - If V(absent), the sudoers rule will be added without validation.
- - If V(detect) and visudo is available, then the sudoers rule will be validated by visudo.
- - If V(required), visudo must be available to validate the sudoers rule.
+ - If V(absent), the sudoers rule is added without validation.
+ - If V(detect) and C(visudo) is available, then the sudoers rule is validated by C(visudo).
+ - If V(required), C(visudo) must be available to validate the sudoers rule.
type: str
default: detect
choices: [absent, detect, required]
diff --git a/plugins/modules/supervisorctl.py b/plugins/modules/supervisorctl.py
index 7df1674fea..c2ceb1a52b 100644
--- a/plugins/modules/supervisorctl.py
+++ b/plugins/modules/supervisorctl.py
@@ -26,8 +26,8 @@ options:
type: str
description:
- The name of the supervisord program or group to manage.
- - The name will be taken as group name when it ends with a colon V(:).
- - If O(name=all), all programs and program groups will be managed.
+ - The name is taken as group name when it ends with a colon V(:).
+ - If O(name=all), all programs and program groups are managed.
required: true
config:
type: path
@@ -67,12 +67,11 @@ options:
description:
- Path to C(supervisorctl) executable.
notes:
- - When O(state=present), the module will call C(supervisorctl reread) then C(supervisorctl add) if the program/group does
- not exist.
- - When O(state=restarted), the module will call C(supervisorctl update) then call C(supervisorctl restart).
- - When O(state=absent), the module will call C(supervisorctl reread) then C(supervisorctl remove) to remove the target program/group.
- If the program/group is still running, the action will fail. If you want to stop the program/group before removing, use
- O(stop_before_removing=true).
+ - When O(state=present), the module calls C(supervisorctl reread) then C(supervisorctl add) if the program/group does not
+ exist.
+ - When O(state=restarted), the module calls C(supervisorctl update) then calls C(supervisorctl restart).
+ - When O(state=absent), the module calls C(supervisorctl reread) then C(supervisorctl remove) to remove the target program/group.
+ If the program/group is still running, the action fails. If you want to stop the program/group before removing, use O(stop_before_removing=true).
requirements: ["supervisorctl"]
author:
- "Matt Wright (@mattupstate)"
diff --git a/plugins/modules/svc.py b/plugins/modules/svc.py
index 42b6bcbeb9..4a6e21ef5f 100644
--- a/plugins/modules/svc.py
+++ b/plugins/modules/svc.py
@@ -30,10 +30,10 @@ options:
required: true
state:
description:
- - V(started)/V(stopped) are idempotent actions that will not run commands unless necessary.
- - V(restarted) will always bounce the svc (svc -t) and V(killed) will always bounce the svc (svc -k).
- - V(reloaded) will send a sigusr1 (svc -1).
- - V(once) will run a normally downed svc once (svc -o), not really an idempotent operation.
+ - V(started)/V(stopped) are idempotent actions that do not run commands unless necessary.
+ - V(restarted) always bounces the svc (svc -t) and V(killed) always bounces the svc (svc -k).
+ - V(reloaded) sends a sigusr1 (svc -1).
+ - V(once) runs a normally downed svc once (svc -o), not really an idempotent operation.
type: str
choices: [killed, once, reloaded, restarted, started, stopped]
downed:
diff --git a/plugins/modules/svr4pkg.py b/plugins/modules/svr4pkg.py
index 34aa599e01..76d65c8f43 100644
--- a/plugins/modules/svr4pkg.py
+++ b/plugins/modules/svr4pkg.py
@@ -16,7 +16,7 @@ short_description: Manage Solaris SVR4 packages
description:
- Manages SVR4 packages on Solaris 10 and 11.
- These were the native packages on Solaris <= 10 and are available as a legacy feature in Solaris 11.
- - Note that this is a very basic packaging system. It will not enforce dependencies on install or remove.
+ - Note that this is a very basic packaging system. It does not enforce dependencies on install or remove.
author: "Boyd Adamson (@brontitall)"
extends_documentation_fragment:
- community.general.attributes
@@ -192,10 +192,10 @@ def main():
argument_spec=dict(
name=dict(required=True),
state=dict(required=True, choices=['present', 'absent']),
- src=dict(default=None),
- proxy=dict(default=None),
- response_file=dict(default=None),
- zone=dict(required=False, default='all', choices=['current', 'all']),
+ src=dict(),
+ proxy=dict(),
+ response_file=dict(),
+ zone=dict(default='all', choices=['current', 'all']),
category=dict(default=False, type='bool')
),
supports_check_mode=True
diff --git a/plugins/modules/swdepot.py b/plugins/modules/swdepot.py
index 628c63f810..69ed726aa0 100644
--- a/plugins/modules/swdepot.py
+++ b/plugins/modules/swdepot.py
@@ -16,7 +16,7 @@ DOCUMENTATION = r"""
module: swdepot
short_description: Manage packages with swdepot package manager (HP-UX)
description:
- - Will install, upgrade and remove packages with swdepot package manager (HP-UX).
+ - Installs, upgrades, and removes packages with C(swdepot) package manager (HP-UX).
notes: []
author: "Raul Melo (@melodous)"
extends_documentation_fragment:
@@ -134,7 +134,7 @@ def main():
argument_spec=dict(
name=dict(aliases=['pkg'], required=True),
state=dict(choices=['present', 'absent', 'latest'], required=True),
- depot=dict(default=None, required=False)
+ depot=dict()
),
supports_check_mode=True
)
diff --git a/plugins/modules/swupd.py b/plugins/modules/swupd.py
index 5b5dbbc48a..c01904821c 100644
--- a/plugins/modules/swupd.py
+++ b/plugins/modules/swupd.py
@@ -96,16 +96,6 @@ EXAMPLES = r"""
manifest: 12920
"""
-RETURN = r"""
-stdout:
- description: C(stdout) of C(swupd).
- returned: always
- type: str
-stderr:
- description: C(stderr) of C(swupd).
- returned: always
- type: str
-"""
import os
from ansible.module_utils.basic import AnsibleModule
diff --git a/plugins/modules/syslogger.py b/plugins/modules/syslogger.py
index 382f1c8422..7f7dfabd38 100644
--- a/plugins/modules/syslogger.py
+++ b/plugins/modules/syslogger.py
@@ -35,8 +35,25 @@ options:
type: str
description:
- Set the log facility.
- choices: ["kern", "user", "mail", "daemon", "auth", "lpr", "news", "uucp", "cron", "syslog", "local0", "local1", "local2",
- "local3", "local4", "local5", "local6", "local7"]
+ choices:
+ - kern
+ - user
+ - mail
+ - daemon
+ - auth
+ - lpr
+ - news
+ - uucp
+ - cron
+ - syslog
+ - local0
+ - local1
+ - local2
+ - local3
+ - local4
+ - local5
+ - local6
+ - local7
default: "daemon"
log_pid:
description:
@@ -150,17 +167,17 @@ def main():
module_args = dict(
ident=dict(type='str', default='ansible_syslogger'),
msg=dict(type='str', required=True),
- priority=dict(type='str', required=False,
+ priority=dict(type='str',
choices=["emerg", "alert", "crit", "err", "warning",
"notice", "info", "debug"],
default='info'),
- facility=dict(type='str', required=False,
+ facility=dict(type='str',
choices=["kern", "user", "mail", "daemon", "auth",
"lpr", "news", "uucp", "cron", "syslog",
"local0", "local1", "local2", "local3",
"local4", "local5", "local6", "local7"],
default='daemon'),
- log_pid=dict(type='bool', required=False, default=False)
+ log_pid=dict(type='bool', default=False)
)
module = AnsibleModule(
diff --git a/plugins/modules/syspatch.py b/plugins/modules/syspatch.py
index 3cedc220f7..f46671fa74 100644
--- a/plugins/modules/syspatch.py
+++ b/plugins/modules/syspatch.py
@@ -59,20 +59,6 @@ EXAMPLES = r"""
"""
RETURN = r"""
-rc:
- description: The command return code (0 means success).
- returned: always
- type: int
-stdout:
- description: C(syspatch) standard output.
- returned: always
- type: str
- sample: "001_rip6cksum"
-stderr:
- description: C(syspatch) standard error.
- returned: always
- type: str
- sample: "syspatch: need root privileges"
reboot_needed:
description: Whether or not a reboot is required after an update.
returned: always
@@ -103,7 +89,6 @@ def syspatch_run(module):
cmd = module.get_bin_path('syspatch', True)
changed = False
reboot_needed = False
- warnings = []
# Set safe defaults for run_flag and check_flag
run_flag = ['-c']
@@ -145,11 +130,11 @@ def syspatch_run(module):
# Kernel update applied
reboot_needed = True
elif out.lower().find('syspatch updated itself') >= 0:
- warnings.append('Syspatch was updated. Please run syspatch again.')
+ module.warn('Syspatch was updated. Please run syspatch again.')
# If no stdout, then warn user
if len(out) == 0:
- warnings.append('syspatch had suggested changes, but stdout was empty.')
+ module.warn('syspatch had suggested changes, but stdout was empty.')
changed = True
else:
@@ -161,7 +146,6 @@ def syspatch_run(module):
rc=rc,
stderr=err,
stdout=out,
- warnings=warnings
)
diff --git a/plugins/modules/sysrc.py b/plugins/modules/sysrc.py
index 3387483e46..a3e24830a0 100644
--- a/plugins/modules/sysrc.py
+++ b/plugins/modules/sysrc.py
@@ -7,6 +7,7 @@
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import absolute_import, division, print_function
+
__metaclass__ = type
DOCUMENTATION = r"""
@@ -94,165 +95,122 @@ EXAMPLES = r"""
jail: testjail
"""
-RETURN = r"""
-changed:
- description: Return changed for sysrc actions.
- returned: always
- type: bool
- sample: true
-"""
-from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.module_helper import StateModuleHelper
+
+import os
import re
-class Sysrc(object):
- def __init__(self, module, name, value, path, delim, jail):
- self.module = module
- self.name = name
- self.changed = False
- self.value = value
- self.path = path
- self.delim = delim
- self.jail = jail
- self.sysrc = module.get_bin_path('sysrc', True)
-
- def has_unknown_variable(self, out, err):
- # newer versions of sysrc use stderr instead of stdout
- return err.find("unknown variable") > 0 or out.find("unknown variable") > 0
-
- def exists(self):
- """
- Tests whether the name is in the file. If parameter value is defined,
- then tests whether name=value is in the file. These tests are necessary
- because sysrc doesn't use exit codes. Instead, let sysrc read the
- file's content and create a dictionary comprising the configuration.
- Use this dictionary to preform the tests.
- """
- (rc, out, err) = self.run_sysrc('-e', '-a')
- conf = dict([i.split('=') for i in out.splitlines()])
- if self.value is None:
- return self.name in conf
- else:
- return self.name in conf and conf[self.name] == '"%s"' % self.value
-
- def contains(self):
- (rc, out, err) = self.run_sysrc('-n', self.name)
- if self.has_unknown_variable(out, err):
- return False
-
- return self.value in out.strip().split(self.delim)
-
- def present(self):
- if self.exists():
- return
-
- if not self.module.check_mode:
- (rc, out, err) = self.run_sysrc("%s=%s" % (self.name, self.value))
-
- self.changed = True
-
- def absent(self):
- if not self.exists():
- return
-
- # inversed since we still need to mark as changed
- if not self.module.check_mode:
- (rc, out, err) = self.run_sysrc('-x', self.name)
- if self.has_unknown_variable(out, err):
- return
-
- self.changed = True
-
- def value_present(self):
- if self.contains():
- return
-
- if self.module.check_mode:
- self.changed = True
- return
-
- setstring = '%s+=%s%s' % (self.name, self.delim, self.value)
- (rc, out, err) = self.run_sysrc(setstring)
- if out.find("%s:" % self.name) == 0:
- values = out.split(' -> ')[1].strip().split(self.delim)
- if self.value in values:
- self.changed = True
-
- def value_absent(self):
- if not self.contains():
- return
-
- if self.module.check_mode:
- self.changed = True
- return
-
- setstring = '%s-=%s%s' % (self.name, self.delim, self.value)
- (rc, out, err) = self.run_sysrc(setstring)
- if out.find("%s:" % self.name) == 0:
- values = out.split(' -> ')[1].strip().split(self.delim)
- if self.value not in values:
- self.changed = True
-
- def run_sysrc(self, *args):
- cmd = [self.sysrc, '-f', self.path]
- if self.jail:
- cmd += ['-j', self.jail]
- cmd.extend(args)
-
- (rc, out, err) = self.module.run_command(cmd)
-
- return (rc, out, err)
-
-
-def main():
- module = AnsibleModule(
+class Sysrc(StateModuleHelper):
+ module = dict(
argument_spec=dict(
name=dict(type='str', required=True),
- value=dict(type='str', default=None),
+ value=dict(type='str'),
state=dict(type='str', default='present', choices=['absent', 'present', 'value_present', 'value_absent']),
path=dict(type='str', default='/etc/rc.conf'),
delim=dict(type='str', default=' '),
- jail=dict(type='str', default=None),
+ jail=dict(type='str')
),
- supports_check_mode=True,
+ supports_check_mode=True
)
+ output_params = ('value',)
+ use_old_vardict = False
- name = module.params.pop('name')
- # OID style names are not supported
- if not re.match('^[a-zA-Z0-9_]+$', name):
- module.fail_json(
- msg="Name may only contain alphanumeric and underscore characters"
- )
+ def __init_module__(self):
+ # OID style names are not supported
+ if not re.match(r'^\w+$', self.vars.name, re.ASCII):
+ self.module.fail_json(msg="Name may only contain alpha-numeric and underscore characters")
- value = module.params.pop('value')
- state = module.params.pop('state')
- path = module.params.pop('path')
- delim = module.params.pop('delim')
- jail = module.params.pop('jail')
- result = dict(
- name=name,
- state=state,
- value=value,
- path=path,
- delim=delim,
- jail=jail
- )
+ self.sysrc = self.module.get_bin_path('sysrc', True)
- rc_value = Sysrc(module, name, value, path, delim, jail)
+ def _contains(self):
+ value = self._get()
+ if value is None:
+ return False, None
- if state == 'present':
- rc_value.present()
- elif state == 'absent':
- rc_value.absent()
- elif state == 'value_present':
- rc_value.value_present()
- elif state == 'value_absent':
- rc_value.value_absent()
+ value = value.split(self.vars.delim)
- result['changed'] = rc_value.changed
+ return self.vars.value in value, value
- module.exit_json(**result)
+ def _get(self):
+ if not os.path.exists(self.vars.path):
+ return None
+
+ (rc, out, err) = self._sysrc('-v', '-n', self.vars.name)
+ if "unknown variable" in err or "unknown variable" in out:
+ # Prior to FreeBSD 11.1 sysrc would write "unknown variable" to stdout and not stderr
+ # https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=229806
+ return None
+
+ if out.startswith(self.vars.path):
+ return out.split(':', 1)[1].strip()
+
+ return None
+
+ def _modify(self, op, changed):
+ (rc, out, err) = self._sysrc("%s%s=%s%s" % (self.vars.name, op, self.vars.delim, self.vars.value))
+ if out.startswith("%s:" % self.vars.name):
+ return changed(out.split(' -> ')[1].strip().split(self.vars.delim))
+
+ return False
+
+ def _sysrc(self, *args):
+ cmd = [self.sysrc, '-f', self.vars.path]
+ if self.vars.jail:
+ cmd += ['-j', self.vars.jail]
+ cmd.extend(args)
+
+ (rc, out, err) = self.module.run_command(cmd)
+ if "Permission denied" in err:
+ self.module.fail_json(msg="Permission denied for %s" % self.vars.path)
+
+ return rc, out, err
+
+ def state_absent(self):
+ if self._get() is None:
+ return
+
+ if not self.check_mode:
+ self._sysrc('-x', self.vars.name)
+
+ self.changed = True
+
+ def state_present(self):
+ value = self._get()
+ if value == self.vars.value:
+ return
+
+ if self.vars.value is None:
+ self.vars.set('value', value)
+ return
+
+ if not self.check_mode:
+ self._sysrc("%s=%s" % (self.vars.name, self.vars.value))
+
+ self.changed = True
+
+ def state_value_absent(self):
+ (contains, _unused) = self._contains()
+ if not contains:
+ return
+
+ self.changed = self.check_mode or self._modify('-', lambda values: self.vars.value not in values)
+
+ def state_value_present(self):
+ (contains, value) = self._contains()
+ if contains:
+ return
+
+ if self.vars.value is None:
+ self.vars.set('value', value)
+ return
+
+ self.changed = self.check_mode or self._modify('+', lambda values: self.vars.value in values)
+
+
+def main():
+ Sysrc.execute()
if __name__ == '__main__':
diff --git a/plugins/modules/systemd_creds_decrypt.py b/plugins/modules/systemd_creds_decrypt.py
index fbe80f2f16..c896737a93 100644
--- a/plugins/modules/systemd_creds_decrypt.py
+++ b/plugins/modules/systemd_creds_decrypt.py
@@ -100,16 +100,12 @@ def main():
"""Decrypt secret using systemd-creds."""
module = AnsibleModule(
argument_spec=dict(
- name=dict(type="str", required=False),
- newline=dict(type="bool", required=False, default=False),
+ name=dict(type="str"),
+ newline=dict(type="bool", default=False),
secret=dict(type="str", required=True, no_log=True),
- timestamp=dict(type="str", required=False),
- transcode=dict(
- type="str",
- choices=["base64", "unbase64", "hex", "unhex"],
- required=False,
- ),
- user=dict(type="str", required=False),
+ timestamp=dict(type="str"),
+ transcode=dict(type="str", choices=["base64", "unbase64", "hex", "unhex"]),
+ user=dict(type="str"),
),
supports_check_mode=True,
)
diff --git a/plugins/modules/systemd_creds_encrypt.py b/plugins/modules/systemd_creds_encrypt.py
index 6f6e635416..2c4912427e 100644
--- a/plugins/modules/systemd_creds_encrypt.py
+++ b/plugins/modules/systemd_creds_encrypt.py
@@ -97,12 +97,12 @@ def main():
"""Encrypt secret using systemd-creds."""
module = AnsibleModule(
argument_spec=dict(
- name=dict(type="str", required=False),
- not_after=dict(type="str", required=False),
+ name=dict(type="str"),
+ not_after=dict(type="str"),
pretty=dict(type="bool", default=False),
secret=dict(type="str", required=True, no_log=True),
- timestamp=dict(type="str", required=False),
- user=dict(type="str", required=False),
+ timestamp=dict(type="str"),
+ user=dict(type="str"),
),
supports_check_mode=True,
)
diff --git a/plugins/modules/systemd_info.py b/plugins/modules/systemd_info.py
index d87df32780..12f308849c 100644
--- a/plugins/modules/systemd_info.py
+++ b/plugins/modules/systemd_info.py
@@ -8,20 +8,19 @@
from __future__ import absolute_import, division, print_function
__metaclass__ = type
-DOCUMENTATION = r'''
----
+DOCUMENTATION = r"""
module: systemd_info
short_description: Gather C(systemd) unit info
description:
- This module gathers info about systemd units (services, targets, sockets, mounts, timers).
- Timer units are supported since community.general 10.5.0.
- - It runs C(systemctl list-units) (or processes selected units) and collects properties
- for each unit using C(systemctl show).
- - In case a unit has multiple properties with the same name, only the value of the first one will be collected.
- - Even if a unit has a RV(units.loadstate) of V(not-found) or V(masked), it is returned,
- but only with the minimal properties (RV(units.name), RV(units.loadstate), RV(units.activestate), RV(units.substate)).
- - When O(unitname) and O(extra_properties) are used, the module first checks if the unit exists,
- then check if properties exist. If not, the module fails.
+ - It runs C(systemctl list-units) (or processes selected units) and collects properties for each unit using C(systemctl
+ show).
+ - In case a unit has multiple properties with the same name, only the value of the first one is collected.
+ - Even if a unit has a RV(units.loadstate) of V(not-found) or V(masked), it is returned, but only with the minimal properties
+ (RV(units.name), RV(units.loadstate), RV(units.activestate), RV(units.substate)).
+ - When O(unitname) and O(extra_properties) are used, the module first checks if the unit exists, then check if properties
+ exist. If not, the module fails.
- When O(unitname) is used with wildcard expressions, the module checks for units that match the indicated expressions,
if units are not present for all the indicated expressions, the module fails.
version_added: "10.4.0"
@@ -48,9 +47,9 @@ author:
extends_documentation_fragment:
- community.general.attributes
- community.general.attributes.info_module
-'''
+"""
-EXAMPLES = r'''
+EXAMPLES = r"""
---
# Gather info for all systemd services, targets, sockets, mount and timer
- name: Gather all systemd unit info
@@ -84,13 +83,13 @@ EXAMPLES = r'''
extra_properties:
- AccuracyUSec
register: results
-'''
+"""
-RETURN = r'''
+RETURN = r"""
units:
description:
- Dictionary of systemd unit info keyed by unit name.
- - Additional fields will be returned depending on the value of O(extra_properties).
+ - Additional fields are returned depending on the value of O(extra_properties).
returned: success
type: dict
elements: dict
@@ -117,7 +116,8 @@ units:
substate:
description:
- The detailed sub state of the unit.
- - The most common values are V(running), V(dead), V(exited), V(failed), V(listening), V(active), and V(mounted), but other values are possible as well.
+ - The most common values are V(running), V(dead), V(exited), V(failed), V(listening), V(active), and V(mounted), but
+ other values are possible as well.
returned: always
type: str
sample: running
@@ -170,8 +170,9 @@ units:
returned: only for C(.mount) units.
type: str
sample: /
- sample: {
- "-.mount": {
+ sample:
+ {
+ "-.mount": {
"activestate": "active",
"description": "Root Mount",
"loadstate": "loaded",
@@ -181,8 +182,8 @@ units:
"type": "xfs",
"what": "/dev/mapper/cs-root",
"where": "/"
- },
- "sshd-keygen.target": {
+ },
+ "sshd-keygen.target": {
"activestate": "active",
"description": "sshd-keygen.target",
"fragmentpath": "/usr/lib/systemd/system/sshd-keygen.target",
@@ -191,8 +192,8 @@ units:
"substate": "active",
"unitfilepreset": "disabled",
"unitfilestate": "static"
- },
- "systemd-journald.service": {
+ },
+ "systemd-journald.service": {
"activestate": "active",
"description": "Journal Service",
"execmainpid": "613",
@@ -203,8 +204,8 @@ units:
"substate": "running",
"unitfilepreset": "disabled",
"unitfilestate": "static"
- },
- "systemd-journald.socket": {
+ },
+ "systemd-journald.socket": {
"activestate": "active",
"description": "Journal Socket",
"fragmentpath": "/usr/lib/systemd/system/systemd-journald.socket",
@@ -213,9 +214,9 @@ units:
"substate": "running",
"unitfilepreset": "disabled",
"unitfilestate": "static"
+ }
}
- }
-'''
+"""
import fnmatch
from ansible.module_utils.basic import AnsibleModule
diff --git a/plugins/modules/sysupgrade.py b/plugins/modules/sysupgrade.py
index cb9eb08226..d247e9d74c 100644
--- a/plugins/modules/sysupgrade.py
+++ b/plugins/modules/sysupgrade.py
@@ -25,7 +25,7 @@ options:
snapshot:
description:
- Apply the latest snapshot.
- - Otherwise release will be applied.
+ - Otherwise release is applied.
default: false
type: bool
force:
@@ -36,14 +36,13 @@ options:
keep_files:
description:
- Keep the files under C(/home/_sysupgrade).
- - By default, the files will be deleted after the upgrade.
+ - By default, the files are deleted after the upgrade.
default: false
type: bool
fetch_only:
description:
- Fetch and verify files and create C(/bsd.upgrade) but do not reboot.
- - Set to V(false) if you want C(sysupgrade) to reboot. This will cause Ansible to error, as it expects the module to
- exit gracefully. See the examples.
+ - Set to V(false) if you want C(sysupgrade) to reboot. This causes the module to fail. See the examples.
default: true
type: bool
installurl:
@@ -79,21 +78,6 @@ EXAMPLES = r"""
ignore_errors: true
"""
-RETURN = r"""
-rc:
- description: The command return code (0 means success).
- returned: always
- type: int
-stdout:
- description: Sysupgrade standard output.
- returned: always
- type: str
-stderr:
- description: Sysupgrade standard error.
- returned: always
- type: str
- sample: "sysupgrade: need root privileges"
-"""
from ansible.module_utils.basic import AnsibleModule
@@ -102,7 +86,6 @@ def sysupgrade_run(module):
sysupgrade_bin = module.get_bin_path('/usr/sbin/sysupgrade', required=True)
cmd = [sysupgrade_bin]
changed = False
- warnings = []
# Setup command flags
if module.params['snapshot']:
@@ -138,7 +121,6 @@ def sysupgrade_run(module):
rc=rc,
stderr=err,
stdout=out,
- warnings=warnings
)
diff --git a/plugins/modules/taiga_issue.py b/plugins/modules/taiga_issue.py
index 4e6f3aa676..d7f8824c95 100644
--- a/plugins/modules/taiga_issue.py
+++ b/plugins/modules/taiga_issue.py
@@ -119,7 +119,7 @@ EXAMPLES = r"""
state: absent
"""
-RETURN = """# """
+RETURN = """#"""
import traceback
from os import getenv
@@ -255,18 +255,18 @@ def manage_issue(taiga_host, project_name, issue_subject, issue_priority,
def main():
module = AnsibleModule(
argument_spec=dict(
- taiga_host=dict(type='str', required=False, default="https://api.taiga.io"),
+ taiga_host=dict(type='str', default="https://api.taiga.io"),
project=dict(type='str', required=True),
subject=dict(type='str', required=True),
issue_type=dict(type='str', required=True),
- priority=dict(type='str', required=False, default="Normal"),
- status=dict(type='str', required=False, default="New"),
- severity=dict(type='str', required=False, default="Normal"),
- description=dict(type='str', required=False, default=""),
- attachment=dict(type='path', required=False, default=None),
- attachment_description=dict(type='str', required=False, default=""),
- tags=dict(required=False, default=[], type='list', elements='str'),
- state=dict(type='str', required=False, choices=['present', 'absent'], default='present'),
+ priority=dict(type='str', default="Normal"),
+ status=dict(type='str', default="New"),
+ severity=dict(type='str', default="Normal"),
+ description=dict(type='str', default=""),
+ attachment=dict(type='path'),
+ attachment_description=dict(type='str', default=""),
+ tags=dict(default=[], type='list', elements='str'),
+ state=dict(type='str', choices=['present', 'absent'], default='present'),
),
supports_check_mode=True
)
diff --git a/plugins/modules/telegram.py b/plugins/modules/telegram.py
index cb2e6df2dd..abaa72c83e 100644
--- a/plugins/modules/telegram.py
+++ b/plugins/modules/telegram.py
@@ -21,7 +21,7 @@ description:
- Send notifications using telegram bot, to a verified group or user.
- Also, the user may try to use any other telegram bot API method, if you specify O(api_method) argument.
notes:
- - You will require a telegram account and create telegram bot to use this module.
+ - You need a telegram account and create telegram bot to use this module.
extends_documentation_fragment:
- community.general.attributes
attributes:
@@ -55,7 +55,7 @@ EXAMPLES = r"""
community.general.telegram:
token: '9999999:XXXXXXXXXXXXXXXXXXXXXXX'
api_args:
- chat_id: 000000
+ chat_id: "000000"
parse_mode: "markdown"
text: "Your precious application has been deployed: https://example.com"
disable_web_page_preview: true
@@ -66,7 +66,7 @@ EXAMPLES = r"""
token: '9999999:XXXXXXXXXXXXXXXXXXXXXXX'
api_method: forwardMessage
api_args:
- chat_id: 000000
+ chat_id: "000000"
from_chat_id: 111111
disable_notification: true
message_id: '{{ saved_msg_id }}'
diff --git a/plugins/modules/terraform.py b/plugins/modules/terraform.py
index 106702ea01..a5adbcbe7e 100644
--- a/plugins/modules/terraform.py
+++ b/plugins/modules/terraform.py
@@ -58,19 +58,19 @@ options:
purge_workspace:
description:
- Only works with state = absent.
- - If true, the workspace will be deleted after the "terraform destroy" action.
- - The 'default' workspace will not be deleted.
+ - If V(true), the O(workspace) is deleted after the C(terraform destroy) action.
+ - If O(workspace=default) then it is not deleted.
default: false
type: bool
plan_file:
description:
- - The path to an existing Terraform plan file to apply. If this is not specified, Ansible will build a new TF plan and
- execute it. Note that this option is required if 'state' has the 'planned' value.
+ - The path to an existing Terraform plan file to apply. If this is not specified, Ansible builds a new TF plan and execute
+ it. Note that this option is required if O(state=planned).
type: path
state_file:
description:
- The path to an existing Terraform state file to use when building plan. If this is not specified, the default C(terraform.tfstate)
- will be used.
+ is used.
- This option is ignored when plan is specified.
type: path
variables_files:
@@ -91,7 +91,7 @@ options:
- Ansible dictionaries are mapped to terraform objects.
- Ansible lists are mapped to terraform lists.
- Ansible booleans are mapped to terraform booleans.
- - B(Note) passwords passed as variables will be visible in the log output. Make sure to use C(no_log=true) in production!.
+ - B(Note) passwords passed as variables are visible in the log output. Make sure to use C(no_log=true) in production!.
type: dict
complex_vars:
description:
@@ -104,7 +104,7 @@ options:
version_added: 5.7.0
targets:
description:
- - A list of specific resources to target in this plan/application. The resources selected here will also auto-include
+ - A list of specific resources to target in this plan/application. The resources selected here are also auto-include
any dependencies.
type: list
elements: str
@@ -120,7 +120,7 @@ options:
type: int
force_init:
description:
- - To avoid duplicating infra, if a state file cannot be found this will force a C(terraform init). Generally, this should
+ - To avoid duplicating infra, if a state file cannot be found this forces a C(terraform init). Generally, this should
be turned off unless you intend to provision an entirely new Terraform deployment.
default: false
type: bool
@@ -165,6 +165,13 @@ options:
- Restrict concurrent operations when Terraform applies the plan.
type: int
version_added: '3.8.0'
+ no_color:
+ description:
+ - If V(true), suppress color codes in output from Terraform commands.
+ - If V(false), allows Terraform to use color codes in its output.
+ type: bool
+ default: true
+ version_added: 11.0.0
notes:
- To just run a C(terraform plan), use check mode.
requirements: ["terraform"]
@@ -177,6 +184,12 @@ EXAMPLES = r"""
project_path: '{{ project_dir }}'
state: present
+- name: Deploy with color output enabled
+ community.general.terraform:
+ project_path: '{{ project_dir }}'
+ state: present
+ no_color: false
+
- name: Define the backend configuration at init
community.general.terraform:
project_path: 'project/'
@@ -259,11 +272,6 @@ outputs:
type: str
returned: always
description: The value of the output as interpolated by Terraform.
-stdout:
- type: str
- description: Full C(terraform) command stdout, in case you want to display it or examine the event log.
- returned: always
- sample: ''
command:
type: str
description: Full C(terraform) command built by this module, in case you want to re-run the command outside the module or
@@ -291,17 +299,20 @@ def get_version(bin_path):
return terraform_version
-def preflight_validation(bin_path, project_path, version, variables_args=None, plan_file=None):
+def preflight_validation(bin_path, project_path, version, variables_args=None, plan_file=None, no_color=True):
if project_path is None or '/' not in project_path:
module.fail_json(msg="Path for Terraform project can not be None or ''.")
if not os.path.exists(bin_path):
module.fail_json(msg="Path for Terraform binary '{0}' doesn't exist on this host - check the path and try again please.".format(bin_path))
if not os.path.isdir(project_path):
module.fail_json(msg="Path for Terraform project '{0}' doesn't exist on this host - check the path and try again please.".format(project_path))
+ cmd = [bin_path, 'validate']
+ if no_color:
+ cmd.append('-no-color')
if LooseVersion(version) < LooseVersion('0.15.0'):
- module.run_command([bin_path, 'validate', '-no-color'] + variables_args, check_rc=True, cwd=project_path)
+ module.run_command(cmd + variables_args, check_rc=True, cwd=project_path)
else:
- module.run_command([bin_path, 'validate', '-no-color'], check_rc=True, cwd=project_path)
+ module.run_command(cmd, check_rc=True, cwd=project_path)
def _state_args(state_file):
@@ -312,8 +323,10 @@ def _state_args(state_file):
return ['-state', state_file]
-def init_plugins(bin_path, project_path, backend_config, backend_config_files, init_reconfigure, provider_upgrade, plugin_paths, workspace):
- command = [bin_path, 'init', '-input=false', '-no-color']
+def init_plugins(bin_path, project_path, backend_config, backend_config_files, init_reconfigure, provider_upgrade, plugin_paths, workspace, no_color=True):
+ command = [bin_path, 'init', '-input=false']
+ if no_color:
+ command.append('-no-color')
if backend_config:
for key, val in backend_config.items():
command.extend([
@@ -333,9 +346,12 @@ def init_plugins(bin_path, project_path, backend_config, backend_config_files, i
rc, out, err = module.run_command(command, check_rc=True, cwd=project_path, environ_update={"TF_WORKSPACE": workspace})
-def get_workspace_context(bin_path, project_path):
+def get_workspace_context(bin_path, project_path, no_color=True):
workspace_ctx = {"current": "default", "all": []}
- command = [bin_path, 'workspace', 'list', '-no-color']
+ command = [bin_path, 'workspace', 'list']
+ if no_color:
+ command.append('-no-color')
+
rc, out, err = module.run_command(command, cwd=project_path)
if rc != 0:
module.warn("Failed to list Terraform workspaces:\n{0}".format(err))
@@ -351,25 +367,27 @@ def get_workspace_context(bin_path, project_path):
return workspace_ctx
-def _workspace_cmd(bin_path, project_path, action, workspace):
- command = [bin_path, 'workspace', action, workspace, '-no-color']
+def _workspace_cmd(bin_path, project_path, action, workspace, no_color=True):
+ command = [bin_path, 'workspace', action, workspace]
+ if no_color:
+ command.append('-no-color')
rc, out, err = module.run_command(command, check_rc=True, cwd=project_path)
return rc, out, err
-def create_workspace(bin_path, project_path, workspace):
- _workspace_cmd(bin_path, project_path, 'new', workspace)
+def create_workspace(bin_path, project_path, workspace, no_color=True):
+ _workspace_cmd(bin_path, project_path, 'new', workspace, no_color)
-def select_workspace(bin_path, project_path, workspace):
- _workspace_cmd(bin_path, project_path, 'select', workspace)
+def select_workspace(bin_path, project_path, workspace, no_color=True):
+ _workspace_cmd(bin_path, project_path, 'select', workspace, no_color)
-def remove_workspace(bin_path, project_path, workspace):
- _workspace_cmd(bin_path, project_path, 'delete', workspace)
+def remove_workspace(bin_path, project_path, workspace, no_color=True):
+ _workspace_cmd(bin_path, project_path, 'delete', workspace, no_color)
-def build_plan(command, project_path, variables_args, state_file, targets, state, args, plan_path=None):
+def build_plan(command, project_path, variables_args, state_file, targets, state, args, plan_path=None, no_color=True):
if plan_path is None:
f, plan_path = tempfile.mkstemp(suffix='.tfplan')
@@ -391,7 +409,10 @@ def build_plan(command, project_path, variables_args, state_file, targets, state
for a in args:
plan_command.append(a)
- plan_command.extend(['-input=false', '-no-color', '-detailed-exitcode', '-out', plan_path])
+ plan_options = ['-input=false', '-detailed-exitcode', '-out', plan_path]
+ if no_color:
+ plan_options.insert(0, '-no-color')
+ plan_command.extend(plan_options)
for t in targets:
plan_command.extend(['-target', t])
@@ -495,6 +516,7 @@ def main():
check_destroy=dict(type='bool', default=False),
parallelism=dict(type='int'),
provider_upgrade=dict(type='bool', default=False),
+ no_color=dict(type='bool', default=True),
),
required_if=[('state', 'planned', ['plan_file'])],
supports_check_mode=True,
@@ -518,6 +540,7 @@ def main():
overwrite_init = module.params.get('overwrite_init')
check_destroy = module.params.get('check_destroy')
provider_upgrade = module.params.get('provider_upgrade')
+ no_color = module.params.get('no_color')
if bin_path is not None:
command = [bin_path]
@@ -527,22 +550,30 @@ def main():
checked_version = get_version(command[0])
if LooseVersion(checked_version) < LooseVersion('0.15.0'):
- DESTROY_ARGS = ('destroy', '-no-color', '-force')
- APPLY_ARGS = ('apply', '-no-color', '-input=false', '-auto-approve=true')
+ if no_color:
+ DESTROY_ARGS = ('destroy', '-no-color', '-force')
+ APPLY_ARGS = ('apply', '-no-color', '-input=false', '-auto-approve=true')
+ else:
+ DESTROY_ARGS = ('destroy', '-force')
+ APPLY_ARGS = ('apply', '-input=false', '-auto-approve=true')
else:
- DESTROY_ARGS = ('destroy', '-no-color', '-auto-approve')
- APPLY_ARGS = ('apply', '-no-color', '-input=false', '-auto-approve')
+ if no_color:
+ DESTROY_ARGS = ('destroy', '-no-color', '-auto-approve')
+ APPLY_ARGS = ('apply', '-no-color', '-input=false', '-auto-approve')
+ else:
+ DESTROY_ARGS = ('destroy', '-auto-approve')
+ APPLY_ARGS = ('apply', '-input=false', '-auto-approve')
if force_init:
if overwrite_init or not os.path.isfile(os.path.join(project_path, ".terraform", "terraform.tfstate")):
- init_plugins(command[0], project_path, backend_config, backend_config_files, init_reconfigure, provider_upgrade, plugin_paths, workspace)
+ init_plugins(command[0], project_path, backend_config, backend_config_files, init_reconfigure, provider_upgrade, plugin_paths, workspace, no_color)
- workspace_ctx = get_workspace_context(command[0], project_path)
+ workspace_ctx = get_workspace_context(command[0], project_path, no_color)
if workspace_ctx["current"] != workspace:
if workspace not in workspace_ctx["all"]:
- create_workspace(command[0], project_path, workspace)
+ create_workspace(command[0], project_path, workspace, no_color)
else:
- select_workspace(command[0], project_path, workspace)
+ select_workspace(command[0], project_path, workspace, no_color)
if state == 'present':
command.extend(APPLY_ARGS)
@@ -627,7 +658,7 @@ def main():
for f in variables_files:
variables_args.extend(['-var-file', f])
- preflight_validation(command[0], project_path, checked_version, variables_args)
+ preflight_validation(command[0], project_path, checked_version, variables_args, plan_file, no_color)
if module.params.get('lock') is not None:
if module.params.get('lock'):
@@ -654,7 +685,7 @@ def main():
module.fail_json(msg='Could not find plan_file "{0}", check the path and try again.'.format(plan_file))
else:
plan_file, needs_application, out, err, command = build_plan(command, project_path, variables_args, state_file,
- module.params.get('targets'), state, APPLY_ARGS, plan_file)
+ module.params.get('targets'), state, APPLY_ARGS, plan_file, no_color)
if state == 'present' and check_destroy and '- destroy' in out:
module.fail_json(msg="Aborting command because it would destroy some resources. "
"Consider switching the 'check_destroy' to false to suppress this error")
@@ -665,13 +696,13 @@ def main():
if state == 'absent':
plan_absent_args = ['-destroy']
plan_file, needs_application, out, err, command = build_plan(command, project_path, variables_args, state_file,
- module.params.get('targets'), state, plan_absent_args, plan_file)
+ module.params.get('targets'), state, plan_absent_args, plan_file, no_color)
diff_command = [command[0], 'show', '-json', plan_file]
rc, diff_output, err = module.run_command(diff_command, check_rc=False, cwd=project_path)
changed, result_diff = get_diff(diff_output)
if rc != 0:
if workspace_ctx["current"] != workspace:
- select_workspace(command[0], project_path, workspace_ctx["current"])
+ select_workspace(command[0], project_path, workspace_ctx["current"], no_color)
module.fail_json(msg=err.rstrip(), rc=rc, stdout=out,
stdout_lines=out.splitlines(), stderr=err,
stderr_lines=err.splitlines(),
@@ -681,7 +712,7 @@ def main():
rc, out, err = module.run_command(command, check_rc=False, cwd=project_path)
if rc != 0:
if workspace_ctx["current"] != workspace:
- select_workspace(command[0], project_path, workspace_ctx["current"])
+ select_workspace(command[0], project_path, workspace_ctx["current"], no_color)
module.fail_json(msg=err.rstrip(), rc=rc, stdout=out,
stdout_lines=out.splitlines(), stderr=err,
stderr_lines=err.splitlines(),
@@ -690,7 +721,11 @@ def main():
if ' 0 added, 0 changed' not in out and not state == "absent" or ' 0 destroyed' not in out:
changed = True
- outputs_command = [command[0], 'output', '-no-color', '-json'] + _state_args(state_file)
+ if no_color:
+ outputs_command = [command[0], 'output', '-no-color', '-json'] + _state_args(state_file)
+ else:
+ outputs_command = [command[0], 'output', '-json'] + _state_args(state_file)
+
rc, outputs_text, outputs_err = module.run_command(outputs_command, cwd=project_path)
outputs = {}
if rc == 1:
@@ -705,9 +740,9 @@ def main():
# Restore the Terraform workspace found when running the module
if workspace_ctx["current"] != workspace:
- select_workspace(command[0], project_path, workspace_ctx["current"])
+ select_workspace(command[0], project_path, workspace_ctx["current"], no_color)
if state == 'absent' and workspace != 'default' and purge_workspace is True:
- remove_workspace(command[0], project_path, workspace)
+ remove_workspace(command[0], project_path, workspace, no_color)
result = {
'state': state,
diff --git a/plugins/modules/timezone.py b/plugins/modules/timezone.py
index 37eb2f94a6..6e105c0bad 100644
--- a/plugins/modules/timezone.py
+++ b/plugins/modules/timezone.py
@@ -57,20 +57,6 @@ author:
- Indrajit Raychaudhuri (@indrajitr)
"""
-RETURN = r"""
-diff:
- description: The differences about the given arguments.
- returned: success
- type: complex
- contains:
- before:
- description: The values before change.
- type: dict
- after:
- description: The values after change.
- type: dict
-"""
-
EXAMPLES = r"""
- name: Set timezone to Asia/Tokyo
become: true
diff --git a/plugins/modules/twilio.py b/plugins/modules/twilio.py
index 09169075a1..4d9dd6ac19 100644
--- a/plugins/modules/twilio.py
+++ b/plugins/modules/twilio.py
@@ -150,7 +150,7 @@ def main():
msg=dict(required=True),
from_number=dict(required=True),
to_numbers=dict(required=True, aliases=['to_number'], type='list', elements='str'),
- media_url=dict(default=None, required=False),
+ media_url=dict(),
),
supports_check_mode=True
)
diff --git a/plugins/modules/typetalk.py b/plugins/modules/typetalk.py
index 505c1a4abd..8728bfb21a 100644
--- a/plugins/modules/typetalk.py
+++ b/plugins/modules/typetalk.py
@@ -14,6 +14,10 @@ module: typetalk
short_description: Send a message to typetalk
description:
- Send a message to typetalk using typetalk API.
+deprecated:
+ removed_in: 13.0.0
+ why: The typetalk service will be discontinued on Dec 2025. See U(https://nulab.com/blog/company-news/typetalk-sunsetting/).
+ alternative: There is none.
extends_documentation_fragment:
- community.general.attributes
attributes:
diff --git a/plugins/modules/udm_dns_zone.py b/plugins/modules/udm_dns_zone.py
index 9031ac0c25..7075572e73 100644
--- a/plugins/modules/udm_dns_zone.py
+++ b/plugins/modules/udm_dns_zone.py
@@ -102,7 +102,7 @@ EXAMPLES = r"""
"""
-RETURN = """# """
+RETURN = """#"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.general.plugins.module_utils.univention_umc import (
diff --git a/plugins/modules/udm_group.py b/plugins/modules/udm_group.py
index 5564ad0e00..b8cb70d4dd 100644
--- a/plugins/modules/udm_group.py
+++ b/plugins/modules/udm_group.py
@@ -85,7 +85,7 @@ EXAMPLES = r"""
"""
-RETURN = """# """
+RETURN = """#"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.general.plugins.module_utils.univention_umc import (
diff --git a/plugins/modules/udm_share.py b/plugins/modules/udm_share.py
index 4b2d29b2b0..d0554375db 100644
--- a/plugins/modules/udm_share.py
+++ b/plugins/modules/udm_share.py
@@ -340,7 +340,7 @@ EXAMPLES = r"""
"""
-RETURN = """# """
+RETURN = """#"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.general.plugins.module_utils.univention_umc import (
diff --git a/plugins/modules/udm_user.py b/plugins/modules/udm_user.py
index 08327101f5..46f6e696b2 100644
--- a/plugins/modules/udm_user.py
+++ b/plugins/modules/udm_user.py
@@ -317,7 +317,7 @@ EXAMPLES = r"""
"""
-RETURN = """# """
+RETURN = """#"""
from datetime import date, timedelta
import traceback
diff --git a/plugins/modules/urpmi.py b/plugins/modules/urpmi.py
index 5760a4829f..454921eaf3 100644
--- a/plugins/modules/urpmi.py
+++ b/plugins/modules/urpmi.py
@@ -184,7 +184,7 @@ def install_packages(module, pkgspec, root, force=True, no_recommends=True):
def root_option(root):
- if (root):
+ if root:
return "--root=%s" % (root)
else:
return ""
diff --git a/plugins/modules/utm_aaa_group.py b/plugins/modules/utm_aaa_group.py
index b29f3d50af..d1444b5bd7 100644
--- a/plugins/modules/utm_aaa_group.py
+++ b/plugins/modules/utm_aaa_group.py
@@ -28,7 +28,7 @@ attributes:
options:
name:
description:
- - The name of the object. Will be used to identify the entry.
+ - The name of the object that identifies the entry.
type: str
required: true
adirectory_groups:
@@ -210,20 +210,20 @@ def main():
module = UTMModule(
argument_spec=dict(
name=dict(type='str', required=True),
- adirectory_groups=dict(type='list', elements='str', required=False, default=[]),
- adirectory_groups_sids=dict(type='dict', required=False, default={}),
- backend_match=dict(type='str', required=False, default="none",
+ adirectory_groups=dict(type='list', elements='str', default=[]),
+ adirectory_groups_sids=dict(type='dict', default={}),
+ backend_match=dict(type='str', default="none",
choices=["none", "adirectory", "edirectory", "radius", "tacacs", "ldap"]),
- comment=dict(type='str', required=False, default=""),
- dynamic=dict(type='str', required=False, default="none", choices=["none", "ipsec_dn", "directory_groups"]),
- edirectory_groups=dict(type='list', elements='str', required=False, default=[]),
- ipsec_dn=dict(type='str', required=False, default=""),
- ldap_attribute=dict(type='str', required=False, default=""),
- ldap_attribute_value=dict(type='str', required=False, default=""),
- members=dict(type='list', elements='str', required=False, default=[]),
- network=dict(type='str', required=False, default=""),
- radius_groups=dict(type='list', elements='str', required=False, default=[]),
- tacacs_groups=dict(type='list', elements='str', required=False, default=[]),
+ comment=dict(type='str', default=""),
+ dynamic=dict(type='str', default="none", choices=["none", "ipsec_dn", "directory_groups"]),
+ edirectory_groups=dict(type='list', elements='str', default=[]),
+ ipsec_dn=dict(type='str', default=""),
+ ldap_attribute=dict(type='str', default=""),
+ ldap_attribute_value=dict(type='str', default=""),
+ members=dict(type='list', elements='str', default=[]),
+ network=dict(type='str', default=""),
+ radius_groups=dict(type='list', elements='str', default=[]),
+ tacacs_groups=dict(type='list', elements='str', default=[]),
)
)
try:
diff --git a/plugins/modules/utm_aaa_group_info.py b/plugins/modules/utm_aaa_group_info.py
index 91fe8ce930..ee0d1c1234 100644
--- a/plugins/modules/utm_aaa_group_info.py
+++ b/plugins/modules/utm_aaa_group_info.py
@@ -29,7 +29,7 @@ options:
name:
type: str
description:
- - The name of the object. Will be used to identify the entry.
+ - The name of the object that identifies the entry.
required: true
extends_documentation_fragment:
diff --git a/plugins/modules/utm_ca_host_key_cert.py b/plugins/modules/utm_ca_host_key_cert.py
index b67531c061..1e6fa1c713 100644
--- a/plugins/modules/utm_ca_host_key_cert.py
+++ b/plugins/modules/utm_ca_host_key_cert.py
@@ -29,7 +29,7 @@ attributes:
options:
name:
description:
- - The name of the object. Will be used to identify the entry.
+ - The name of the object that identifies the entry.
required: true
type: str
ca:
@@ -148,9 +148,9 @@ def main():
ca=dict(type='str', required=True),
meta=dict(type='str', required=True),
certificate=dict(type='str', required=True),
- comment=dict(type='str', required=False),
- encrypted=dict(type='bool', required=False, default=False),
- key=dict(type='str', required=False, no_log=True),
+ comment=dict(type='str'),
+ encrypted=dict(type='bool', default=False),
+ key=dict(type='str', no_log=True),
)
)
try:
diff --git a/plugins/modules/utm_ca_host_key_cert_info.py b/plugins/modules/utm_ca_host_key_cert_info.py
index cab6657ab6..a0fcb97146 100644
--- a/plugins/modules/utm_ca_host_key_cert_info.py
+++ b/plugins/modules/utm_ca_host_key_cert_info.py
@@ -28,7 +28,7 @@ options:
name:
type: str
description:
- - The name of the object. Will be used to identify the entry.
+ - The name of the object that identifies the entry.
required: true
extends_documentation_fragment:
diff --git a/plugins/modules/utm_dns_host.py b/plugins/modules/utm_dns_host.py
index 2eb404b38c..e1a63e1f73 100644
--- a/plugins/modules/utm_dns_host.py
+++ b/plugins/modules/utm_dns_host.py
@@ -29,7 +29,7 @@ options:
name:
type: str
description:
- - The name of the object. Will be used to identify the entry.
+ - The name of the object that identifies the entry.
required: true
address:
type: str
@@ -130,7 +130,7 @@ result:
description: Whether the ipv6 address is resolved or not.
type: bool
timeout:
- description: The timeout until a new resolving will be attempted.
+ description: The timeout until a new resolving is attempted.
type: int
"""
@@ -144,14 +144,14 @@ def main():
module = UTMModule(
argument_spec=dict(
name=dict(type='str', required=True),
- address=dict(type='str', required=False, default='0.0.0.0'),
- address6=dict(type='str', required=False, default='::'),
- comment=dict(type='str', required=False, default=""),
- hostname=dict(type='str', required=False),
- interface=dict(type='str', required=False, default=""),
- resolved=dict(type='bool', required=False, default=False),
- resolved6=dict(type='bool', required=False, default=False),
- timeout=dict(type='int', required=False, default=0),
+ address=dict(type='str', default='0.0.0.0'),
+ address6=dict(type='str', default='::'),
+ comment=dict(type='str', default=""),
+ hostname=dict(type='str'),
+ interface=dict(type='str', default=""),
+ resolved=dict(type='bool', default=False),
+ resolved6=dict(type='bool', default=False),
+ timeout=dict(type='int', default=0),
)
)
try:
diff --git a/plugins/modules/utm_network_interface_address.py b/plugins/modules/utm_network_interface_address.py
index 1e3d2ee5c3..7212897655 100644
--- a/plugins/modules/utm_network_interface_address.py
+++ b/plugins/modules/utm_network_interface_address.py
@@ -29,7 +29,7 @@ options:
name:
type: str
description:
- - The name of the object. Will be used to identify the entry.
+ - The name of the object that identifies the entry.
required: true
address:
type: str
@@ -123,10 +123,10 @@ def main():
argument_spec=dict(
name=dict(type='str', required=True),
address=dict(type='str', required=True),
- comment=dict(type='str', required=False, default=""),
- address6=dict(type='str', required=False),
- resolved=dict(type='bool', required=False),
- resolved6=dict(type='bool', required=False),
+ comment=dict(type='str', default=""),
+ address6=dict(type='str'),
+ resolved=dict(type='bool'),
+ resolved6=dict(type='bool'),
)
)
try:
diff --git a/plugins/modules/utm_network_interface_address_info.py b/plugins/modules/utm_network_interface_address_info.py
index b9c394c848..a5b3ff7b3b 100644
--- a/plugins/modules/utm_network_interface_address_info.py
+++ b/plugins/modules/utm_network_interface_address_info.py
@@ -27,7 +27,7 @@ options:
name:
type: str
description:
- - The name of the object. Will be used to identify the entry.
+ - The name of the object that identifies the entry.
required: true
extends_documentation_fragment:
diff --git a/plugins/modules/utm_proxy_auth_profile.py b/plugins/modules/utm_proxy_auth_profile.py
index 207c4ba156..96ae3aa869 100644
--- a/plugins/modules/utm_proxy_auth_profile.py
+++ b/plugins/modules/utm_proxy_auth_profile.py
@@ -30,7 +30,7 @@ options:
name:
type: str
description:
- - The name of the object. Will be used to identify the entry.
+ - The name of the object that identifies the entry.
required: true
aaa:
type: list
@@ -316,29 +316,29 @@ def main():
name=dict(type='str', required=True),
aaa=dict(type='list', elements='str', required=True),
basic_prompt=dict(type='str', required=True),
- backend_mode=dict(type='str', required=False, default="None", choices=['Basic', 'None']),
- backend_strip_basic_auth=dict(type='bool', required=False, default=True),
- backend_user_prefix=dict(type='str', required=False, default=""),
- backend_user_suffix=dict(type='str', required=False, default=""),
- comment=dict(type='str', required=False, default=""),
- frontend_cookie=dict(type='str', required=False),
- frontend_cookie_secret=dict(type='str', required=False, no_log=True),
- frontend_form=dict(type='str', required=False),
- frontend_form_template=dict(type='str', required=False, default=""),
- frontend_login=dict(type='str', required=False),
- frontend_logout=dict(type='str', required=False),
- frontend_mode=dict(type='str', required=False, default="Basic", choices=['Basic', 'Form']),
- frontend_realm=dict(type='str', required=False),
- frontend_session_allow_persistency=dict(type='bool', required=False, default=False),
+ backend_mode=dict(type='str', default="None", choices=['Basic', 'None']),
+ backend_strip_basic_auth=dict(type='bool', default=True),
+ backend_user_prefix=dict(type='str', default=""),
+ backend_user_suffix=dict(type='str', default=""),
+ comment=dict(type='str', default=""),
+ frontend_cookie=dict(type='str'),
+ frontend_cookie_secret=dict(type='str', no_log=True),
+ frontend_form=dict(type='str'),
+ frontend_form_template=dict(type='str', default=""),
+ frontend_login=dict(type='str'),
+ frontend_logout=dict(type='str'),
+ frontend_mode=dict(type='str', default="Basic", choices=['Basic', 'Form']),
+ frontend_realm=dict(type='str'),
+ frontend_session_allow_persistency=dict(type='bool', default=False),
frontend_session_lifetime=dict(type='int', required=True),
- frontend_session_lifetime_limited=dict(type='bool', required=False, default=True),
- frontend_session_lifetime_scope=dict(type='str', required=False, default="hours", choices=['days', 'hours', 'minutes']),
+ frontend_session_lifetime_limited=dict(type='bool', default=True),
+ frontend_session_lifetime_scope=dict(type='str', default="hours", choices=['days', 'hours', 'minutes']),
frontend_session_timeout=dict(type='int', required=True),
- frontend_session_timeout_enabled=dict(type='bool', required=False, default=True),
- frontend_session_timeout_scope=dict(type='str', required=False, default="minutes", choices=['days', 'hours', 'minutes']),
- logout_delegation_urls=dict(type='list', elements='str', required=False, default=[]),
- logout_mode=dict(type='str', required=False, default="None", choices=['None', 'Delegation']),
- redirect_to_requested_url=dict(type='bool', required=False, default=False)
+ frontend_session_timeout_enabled=dict(type='bool', default=True),
+ frontend_session_timeout_scope=dict(type='str', default="minutes", choices=['days', 'hours', 'minutes']),
+ logout_delegation_urls=dict(type='list', elements='str', default=[]),
+ logout_mode=dict(type='str', default="None", choices=['None', 'Delegation']),
+ redirect_to_requested_url=dict(type='bool', default=False)
)
)
try:
diff --git a/plugins/modules/utm_proxy_exception.py b/plugins/modules/utm_proxy_exception.py
index 96cb592e59..174156394c 100644
--- a/plugins/modules/utm_proxy_exception.py
+++ b/plugins/modules/utm_proxy_exception.py
@@ -29,7 +29,7 @@ attributes:
options:
name:
description:
- - The name of the object. Will be used to identify the entry.
+ - The name of the object that identifies the entry.
required: true
type: str
op:
@@ -220,20 +220,20 @@ def main():
module = UTMModule(
argument_spec=dict(
name=dict(type='str', required=True),
- op=dict(type='str', required=False, default='AND', choices=['AND', 'OR']),
- path=dict(type='list', elements='str', required=False, default=[]),
- skip_custom_threats_filters=dict(type='list', elements='str', required=False, default=[]),
- skip_threats_filter_categories=dict(type='list', elements='str', required=False, default=[]),
- skipav=dict(type='bool', required=False, default=False),
- skipbadclients=dict(type='bool', required=False, default=False),
- skipcookie=dict(type='bool', required=False, default=False),
- skipform=dict(type='bool', required=False, default=False),
- skipform_missingtoken=dict(type='bool', required=False, default=False),
- skiphtmlrewrite=dict(type='bool', required=False, default=False),
- skiptft=dict(type='bool', required=False, default=False),
- skipurl=dict(type='bool', required=False, default=False),
- source=dict(type='list', elements='str', required=False, default=[]),
- status=dict(type='bool', required=False, default=True),
+ op=dict(type='str', default='AND', choices=['AND', 'OR']),
+ path=dict(type='list', elements='str', default=[]),
+ skip_custom_threats_filters=dict(type='list', elements='str', default=[]),
+ skip_threats_filter_categories=dict(type='list', elements='str', default=[]),
+ skipav=dict(type='bool', default=False),
+ skipbadclients=dict(type='bool', default=False),
+ skipcookie=dict(type='bool', default=False),
+ skipform=dict(type='bool', default=False),
+ skipform_missingtoken=dict(type='bool', default=False),
+ skiphtmlrewrite=dict(type='bool', default=False),
+ skiptft=dict(type='bool', default=False),
+ skipurl=dict(type='bool', default=False),
+ source=dict(type='list', elements='str', default=[]),
+ status=dict(type='bool', default=True),
)
)
try:
diff --git a/plugins/modules/utm_proxy_frontend.py b/plugins/modules/utm_proxy_frontend.py
index 09f0cdd4bc..5330311516 100644
--- a/plugins/modules/utm_proxy_frontend.py
+++ b/plugins/modules/utm_proxy_frontend.py
@@ -30,7 +30,7 @@ options:
name:
type: str
description:
- - The name of the object. Will be used to identify the entry.
+ - The name of the object that identifies the entry.
required: true
add_content_type_header:
description:
@@ -76,12 +76,12 @@ options:
default: []
htmlrewrite:
description:
- - Whether to enable html rewrite or not.
+ - Whether to enable HTML rewrite or not.
type: bool
default: false
htmlrewrite_cookies:
description:
- - Whether to enable html rewrite cookie or not.
+ - Whether to enable HTML rewrite cookie or not.
type: bool
default: false
implicitredirect:
@@ -204,7 +204,7 @@ result:
description: List of associated proxy exceptions.
type: list
htmlrewrite:
- description: State of html rewrite.
+ description: State of HTML rewrite.
type: bool
htmlrewrite_cookies:
description: Whether the HTML rewrite cookie is set.
@@ -251,26 +251,26 @@ def main():
module = UTMModule(
argument_spec=dict(
name=dict(type='str', required=True),
- add_content_type_header=dict(type='bool', required=False, default=False),
- address=dict(type='str', required=False, default="REF_DefaultInternalAddress"),
- allowed_networks=dict(type='list', elements='str', required=False, default=["REF_NetworkAny"]),
- certificate=dict(type='str', required=False, default=""),
- comment=dict(type='str', required=False, default=""),
- disable_compression=dict(type='bool', required=False, default=False),
- domain=dict(type='list', elements='str', required=False),
- exceptions=dict(type='list', elements='str', required=False, default=[]),
- htmlrewrite=dict(type='bool', required=False, default=False),
- htmlrewrite_cookies=dict(type='bool', required=False, default=False),
- implicitredirect=dict(type='bool', required=False, default=False),
- lbmethod=dict(type='str', required=False, default="bybusyness",
+ add_content_type_header=dict(type='bool', default=False),
+ address=dict(type='str', default="REF_DefaultInternalAddress"),
+ allowed_networks=dict(type='list', elements='str', default=["REF_NetworkAny"]),
+ certificate=dict(type='str', default=""),
+ comment=dict(type='str', default=""),
+ disable_compression=dict(type='bool', default=False),
+ domain=dict(type='list', elements='str'),
+ exceptions=dict(type='list', elements='str', default=[]),
+ htmlrewrite=dict(type='bool', default=False),
+ htmlrewrite_cookies=dict(type='bool', default=False),
+ implicitredirect=dict(type='bool', default=False),
+ lbmethod=dict(type='str', default="bybusyness",
choices=['bybusyness', 'bytraffic', 'byrequests', '']),
- locations=dict(type='list', elements='str', required=False, default=[]),
- port=dict(type='int', required=False, default=80),
- preservehost=dict(type='bool', required=False, default=False),
- profile=dict(type='str', required=False, default=""),
- status=dict(type='bool', required=False, default=True),
- type=dict(type='str', required=False, default="http", choices=['http', 'https']),
- xheaders=dict(type='bool', required=False, default=False),
+ locations=dict(type='list', elements='str', default=[]),
+ port=dict(type='int', default=80),
+ preservehost=dict(type='bool', default=False),
+ profile=dict(type='str', default=""),
+ status=dict(type='bool', default=True),
+ type=dict(type='str', default="http", choices=['http', 'https']),
+ xheaders=dict(type='bool', default=False),
)
)
try:
diff --git a/plugins/modules/utm_proxy_frontend_info.py b/plugins/modules/utm_proxy_frontend_info.py
index 722e2621a2..859ee67de1 100644
--- a/plugins/modules/utm_proxy_frontend_info.py
+++ b/plugins/modules/utm_proxy_frontend_info.py
@@ -29,7 +29,7 @@ options:
name:
type: str
description:
- - The name of the object. Will be used to identify the entry.
+ - The name of the object that identifies the entry.
required: true
extends_documentation_fragment:
@@ -90,7 +90,7 @@ result:
description: List of associated proxy exceptions.
type: list
htmlrewrite:
- description: State of html rewrite.
+ description: State of HTML rewrite.
type: bool
htmlrewrite_cookies:
description: Whether the HTML rewrite cookie is set.
diff --git a/plugins/modules/utm_proxy_location.py b/plugins/modules/utm_proxy_location.py
index 95ee210e97..15b89bb1a2 100644
--- a/plugins/modules/utm_proxy_location.py
+++ b/plugins/modules/utm_proxy_location.py
@@ -30,7 +30,7 @@ options:
name:
type: str
description:
- - The name of the object. Will be used to identify the entry.
+ - The name of the object that identifies the entry.
required: true
access_control:
description:
@@ -198,19 +198,19 @@ def main():
module = UTMModule(
argument_spec=dict(
name=dict(type='str', required=True),
- access_control=dict(type='str', required=False, default="0", choices=['0', '1']),
- allowed_networks=dict(type='list', elements='str', required=False, default=['REF_NetworkAny']),
- auth_profile=dict(type='str', required=False, default=""),
- backend=dict(type='list', elements='str', required=False, default=[]),
- be_path=dict(type='str', required=False, default=""),
- comment=dict(type='str', required=False, default=""),
- denied_networks=dict(type='list', elements='str', required=False, default=[]),
- hot_standby=dict(type='bool', required=False, default=False),
- path=dict(type='str', required=False, default="/"),
- status=dict(type='bool', required=False, default=True),
- stickysession_id=dict(type='str', required=False, default='ROUTEID'),
- stickysession_status=dict(type='bool', required=False, default=False),
- websocket_passthrough=dict(type='bool', required=False, default=False),
+ access_control=dict(type='str', default="0", choices=['0', '1']),
+ allowed_networks=dict(type='list', elements='str', default=['REF_NetworkAny']),
+ auth_profile=dict(type='str', default=""),
+ backend=dict(type='list', elements='str', default=[]),
+ be_path=dict(type='str', default=""),
+ comment=dict(type='str', default=""),
+ denied_networks=dict(type='list', elements='str', default=[]),
+ hot_standby=dict(type='bool', default=False),
+ path=dict(type='str', default="/"),
+ status=dict(type='bool', default=True),
+ stickysession_id=dict(type='str', default='ROUTEID'),
+ stickysession_status=dict(type='bool', default=False),
+ websocket_passthrough=dict(type='bool', default=False),
)
)
try:
diff --git a/plugins/modules/utm_proxy_location_info.py b/plugins/modules/utm_proxy_location_info.py
index 0512d4be15..7a8db919c2 100644
--- a/plugins/modules/utm_proxy_location_info.py
+++ b/plugins/modules/utm_proxy_location_info.py
@@ -29,7 +29,7 @@ options:
name:
type: str
description:
- - The name of the object. Will be used to identify the entry.
+ - The name of the object that identifies the entry.
required: true
extends_documentation_fragment:
diff --git a/plugins/modules/vdo.py b/plugins/modules/vdo.py
index 39eef9fb66..dbfa44f5b6 100644
--- a/plugins/modules/vdo.py
+++ b/plugins/modules/vdo.py
@@ -48,8 +48,8 @@ options:
default: present
activated:
description:
- - The C(activate) status for a VDO volume. If this is set to V(false), the VDO volume cannot be started, and it will
- not start on system startup. However, on initial creation, a VDO volume with "activated" set to "off" will be running,
+ - The C(activate) status for a VDO volume. If this is set to V(false), the VDO volume cannot be started, and it does
+ not start on system startup. However, on initial creation, a VDO volume with O(activated=false) is set to be running
until stopped. This is the default behavior of the C(vdo create) command; it provides the user an opportunity to write
a base amount of metadata (filesystem, LVM headers, and so on) to the VDO volume prior to stopping the volume, and
leaving it deactivated until ready to use.
@@ -220,7 +220,7 @@ EXAMPLES = r"""
state: absent
"""
-RETURN = r"""# """
+RETURN = r"""#"""
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
import re
diff --git a/plugins/modules/vertica_configuration.py b/plugins/modules/vertica_configuration.py
index 39ed27dc2d..d97fbf5ed4 100644
--- a/plugins/modules/vertica_configuration.py
+++ b/plugins/modules/vertica_configuration.py
@@ -139,12 +139,12 @@ def main():
module = AnsibleModule(
argument_spec=dict(
parameter=dict(required=True, aliases=['name']),
- value=dict(default=None),
- db=dict(default=None),
+ value=dict(),
+ db=dict(),
cluster=dict(default='localhost'),
port=dict(default='5433'),
login_user=dict(default='dbadmin'),
- login_password=dict(default=None, no_log=True),
+ login_password=dict(no_log=True),
), supports_check_mode=True)
if not pyodbc_found:
diff --git a/plugins/modules/vertica_info.py b/plugins/modules/vertica_info.py
index 29f8669c27..340e782f31 100644
--- a/plugins/modules/vertica_info.py
+++ b/plugins/modules/vertica_info.py
@@ -227,9 +227,9 @@ def main():
argument_spec=dict(
cluster=dict(default='localhost'),
port=dict(default='5433'),
- db=dict(default=None),
+ db=dict(),
login_user=dict(default='dbadmin'),
- login_password=dict(default=None, no_log=True),
+ login_password=dict(no_log=True),
), supports_check_mode=True)
if not pyodbc_found:
diff --git a/plugins/modules/vertica_role.py b/plugins/modules/vertica_role.py
index d814aca273..550c612b8d 100644
--- a/plugins/modules/vertica_role.py
+++ b/plugins/modules/vertica_role.py
@@ -180,7 +180,7 @@ def main():
module = AnsibleModule(
argument_spec=dict(
role=dict(required=True, aliases=['name']),
- assigned_roles=dict(default=None, aliases=['assigned_role']),
+ assigned_roles=dict(aliases=['assigned_role']),
state=dict(default='present', choices=['absent', 'present']),
db=dict(),
cluster=dict(default='localhost'),
diff --git a/plugins/modules/vmadm.py b/plugins/modules/vmadm.py
index 8808eeea11..fc7504fba5 100644
--- a/plugins/modules/vmadm.py
+++ b/plugins/modules/vmadm.py
@@ -107,7 +107,8 @@ options:
flexible_disk_size:
required: false
description:
- - This sets an upper bound for the amount of space that a bhyve instance may use for its disks and snapshots of those disks (in MiBs).
+ - This sets an upper bound for the amount of space that a bhyve instance may use for its disks and snapshots of those
+ disks (in MiBs).
type: int
version_added: 10.5.0
force:
diff --git a/plugins/modules/xattr.py b/plugins/modules/xattr.py
index fe48ce2eef..cbd9dcc05c 100644
--- a/plugins/modules/xattr.py
+++ b/plugins/modules/xattr.py
@@ -209,7 +209,7 @@ def main():
not (namespace == 'user' and key.startswith('user.'))):
key = '%s.%s' % (namespace, key)
- if (state == 'present' or value is not None):
+ if state == 'present' or value is not None:
current = get_xattr(module, path, key, follow)
if current is None or key not in current or value != current[key]:
if not module.check_mode:
diff --git a/plugins/modules/xbps.py b/plugins/modules/xbps.py
index 3087e4d79d..dc9d131bd7 100644
--- a/plugins/modules/xbps.py
+++ b/plugins/modules/xbps.py
@@ -363,7 +363,7 @@ def main():
module = AnsibleModule(
argument_spec=dict(
- name=dict(default=None, aliases=['pkg', 'package'], type='list', elements='str'),
+ name=dict(aliases=['pkg', 'package'], type='list', elements='str'),
state=dict(default='present', choices=['present', 'installed',
'latest', 'absent',
'removed']),
diff --git a/plugins/modules/xcc_redfish_command.py b/plugins/modules/xcc_redfish_command.py
index 8e5cbf7d3e..9dbbe8016f 100644
--- a/plugins/modules/xcc_redfish_command.py
+++ b/plugins/modules/xcc_redfish_command.py
@@ -273,33 +273,34 @@ redfish_facts:
description: Resource content.
returned: when command == GetResource or command == GetCollectionResource
type: dict
- sample: '{
- "redfish_facts": {
- "data": {
- "@odata.etag": "\"3179bf00d69f25a8b3c\"",
- "@odata.id": "/redfish/v1/Managers/1/NetworkProtocol/Oem/Lenovo/DNS",
- "@odata.type": "#LenovoDNS.v1_0_0.LenovoDNS",
- "DDNS": [
- {
- "DDNSEnable": true,
- "DomainName": "",
- "DomainNameSource": "DHCP"
- }
- ],
- "DNSEnable": true,
- "Description": "This resource is used to represent a DNS resource for a Redfish implementation.",
- "IPv4Address1": "10.103.62.178",
- "IPv4Address2": "0.0.0.0",
- "IPv4Address3": "0.0.0.0",
- "IPv6Address1": "::",
- "IPv6Address2": "::",
- "IPv6Address3": "::",
- "Id": "LenovoDNS",
- "PreferredAddresstype": "IPv4"
- },
- "ret": true
+ sample:
+ {
+ "redfish_facts": {
+ "data": {
+ "@odata.etag": "\"3179bf00d69f25a8b3c\"",
+ "@odata.id": "/redfish/v1/Managers/1/NetworkProtocol/Oem/Lenovo/DNS",
+ "@odata.type": "#LenovoDNS.v1_0_0.LenovoDNS",
+ "DDNS": [
+ {
+ "DDNSEnable": true,
+ "DomainName": "",
+ "DomainNameSource": "DHCP"
+ }
+ ],
+ "DNSEnable": true,
+ "Description": "This resource is used to represent a DNS resource for a Redfish implementation.",
+ "IPv4Address1": "10.103.62.178",
+ "IPv4Address2": "0.0.0.0",
+ "IPv4Address3": "0.0.0.0",
+ "IPv6Address1": "::",
+ "IPv6Address2": "::",
+ "IPv6Address3": "::",
+ "Id": "LenovoDNS",
+ "PreferredAddresstype": "IPv4"
+ },
+ "ret": true
+ }
}
- }'
"""
from ansible.module_utils.basic import AnsibleModule
diff --git a/plugins/modules/xdg_mime.py b/plugins/modules/xdg_mime.py
new file mode 100644
index 0000000000..cf297187a4
--- /dev/null
+++ b/plugins/modules/xdg_mime.py
@@ -0,0 +1,142 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright (c) 2025, Marcos Alano
+# Based on gio_mime module. Copyright (c) 2022, Alexei Znamensky
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# In memory: This code is dedicated to my late grandmother, Maria Marlene. 1936-2025. Rest in peace, grandma.
+# -Marcos Alano-
+
+# TODO: Add support for diff mode
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r"""
+module: xdg_mime
+author:
+ - "Marcos Alano (@mhalano)"
+short_description: Set default handler for MIME types, for applications using XDG tools
+version_added: 10.7.0
+description:
+ - This module allows configuring the default handler for specific MIME types when you use applications that rely on XDG.
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ mime_types:
+ description:
+ - One or more MIME types for which a default handler is set.
+ type: list
+ elements: str
+ required: true
+ handler:
+ description:
+ - Sets the default handler for the specified MIME types.
+ - The desktop file must be installed in the system. If the desktop file is not installed, the module does not fail,
+ but the handler is not set either.
+ - You must pass a handler in the form V(*.desktop), otherwise the module fails.
+ type: str
+ required: true
+notes:
+ - This module is a thin wrapper around C(xdg-mime) tool.
+ - See man xdg-mime(1) for more details.
+seealso:
+ - name: C(xdg-mime) command manual page
+ description: Manual page for the command.
+ link: https://portland.freedesktop.org/doc/xdg-mime.html
+ - name: xdg-utils Documentation
+ description: Reference documentation for xdg-utils.
+ link: https://www.freedesktop.org/wiki/Software/xdg-utils/
+"""
+
+EXAMPLES = r"""
+- name: Set Chrome as the default handler for HTTPS
+ community.general.xdg_mime:
+ mime_types: x-scheme-handler/https
+ handler: google-chrome.desktop
+ register: result
+
+- name: Set Chrome as the default handler for both HTTP and HTTPS
+ community.general.xdg_mime:
+ mime_types:
+ - x-scheme-handler/http
+ - x-scheme-handler/https
+ handler: google-chrome.desktop
+ register: result
+"""
+
+RETURN = r"""
+current_handlers:
+ description:
+ - Currently set handlers for the passed MIME types.
+ returned: success
+ type: list
+ elements: str
+ sample:
+ - google-chrome.desktop
+ - firefox.desktop
+version:
+ description: Version of the C(xdg-mime) tool.
+ type: str
+ returned: always
+ sample: "1.2.1"
+"""
+
+from ansible_collections.community.general.plugins.module_utils.module_helper import ModuleHelper
+from ansible_collections.community.general.plugins.module_utils.xdg_mime import xdg_mime_runner, xdg_mime_get
+
+
+class XdgMime(ModuleHelper):
+ output_params = ['handler']
+
+ module = dict(
+ argument_spec=dict(
+ mime_types=dict(type='list', elements='str', required=True),
+ handler=dict(type='str', required=True),
+ ),
+ supports_check_mode=True,
+ )
+
+ def __init_module__(self):
+ self.runner = xdg_mime_runner(self.module, check_rc=True)
+
+ with self.runner("version") as ctx:
+ rc, out, err = ctx.run()
+ self.vars.version = out.replace("xdg-mime ", "").strip()
+
+ if not self.vars.handler.endswith(".desktop"):
+ self.do_raise(msg="Handler must be a .desktop file")
+
+ self.vars.current_handlers = []
+ for mime in self.vars.mime_types:
+ handler_value = xdg_mime_get(self.runner, mime)
+ if not handler_value:
+ handler_value = ''
+ self.vars.current_handlers.append(handler_value)
+
+ def __run__(self):
+ check_mode_return = (0, 'Module executed in check mode', '')
+
+ if any(h != self.vars.handler for h in self.vars.current_handlers):
+ self.changed = True
+
+ if self.has_changed():
+ with self.runner.context(args_order="default handler mime_types", check_mode_skip=True, check_mode_return=check_mode_return) as ctx:
+ rc, out, err = ctx.run()
+ self.vars.stdout = out
+ self.vars.stderr = err
+ self.vars.set("run_info", ctx.run_info, verbosity=1)
+
+
+def main():
+ XdgMime.execute()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/plugins/modules/xenserver_guest.py b/plugins/modules/xenserver_guest.py
index da1f5439d0..44f9192b7e 100644
--- a/plugins/modules/xenserver_guest.py
+++ b/plugins/modules/xenserver_guest.py
@@ -394,141 +394,143 @@ instance:
description: Metadata about the VM.
returned: always
type: dict
- sample: {
- "cdrom": {
- "type": "none"
- },
- "customization_agent": "native",
- "disks": [
- {
- "name": "testvm_11-0",
- "name_desc": "",
- "os_device": "xvda",
- "size": 42949672960,
- "sr": "Local storage",
- "sr_uuid": "0af1245e-bdb0-ba33-1446-57a962ec4075",
- "vbd_userdevice": "0"
+ sample:
+ {
+ "cdrom": {
+ "type": "none"
},
- {
- "name": "testvm_11-1",
- "name_desc": "",
- "os_device": "xvdb",
- "size": 42949672960,
- "sr": "Local storage",
- "sr_uuid": "0af1245e-bdb0-ba33-1446-57a962ec4075",
- "vbd_userdevice": "1"
+ "customization_agent": "native",
+ "disks": [
+ {
+ "name": "testvm_11-0",
+ "name_desc": "",
+ "os_device": "xvda",
+ "size": 42949672960,
+ "sr": "Local storage",
+ "sr_uuid": "0af1245e-bdb0-ba33-1446-57a962ec4075",
+ "vbd_userdevice": "0"
+ },
+ {
+ "name": "testvm_11-1",
+ "name_desc": "",
+ "os_device": "xvdb",
+ "size": 42949672960,
+ "sr": "Local storage",
+ "sr_uuid": "0af1245e-bdb0-ba33-1446-57a962ec4075",
+ "vbd_userdevice": "1"
+ }
+ ],
+ "domid": "56",
+ "folder": "",
+ "hardware": {
+ "memory_mb": 8192,
+ "num_cpu_cores_per_socket": 2,
+ "num_cpus": 4
+ },
+ "home_server": "",
+ "is_template": false,
+ "name": "testvm_11",
+ "name_desc": "",
+ "networks": [
+ {
+ "gateway": "192.168.0.254",
+ "gateway6": "fc00::fffe",
+ "ip": "192.168.0.200",
+ "ip6": [
+ "fe80:0000:0000:0000:e9cb:625a:32c5:c291",
+ "fc00:0000:0000:0000:0000:0000:0000:0001"
+ ],
+ "mac": "ba:91:3a:48:20:76",
+ "mtu": "1500",
+ "name": "Pool-wide network associated with eth1",
+ "netmask": "255.255.255.128",
+ "prefix": "25",
+ "prefix6": "64",
+ "vif_device": "0"
+ }
+ ],
+ "other_config": {
+ "base_template_name": "Windows Server 2016 (64-bit)",
+ "import_task": "OpaqueRef:e43eb71c-45d6-5351-09ff-96e4fb7d0fa5",
+ "install-methods": "cdrom",
+ "instant": "true",
+ "mac_seed": "f83e8d8a-cfdc-b105-b054-ef5cb416b77e"
+ },
+ "platform": {
+ "acpi": "1",
+ "apic": "true",
+ "cores-per-socket": "2",
+ "device_id": "0002",
+ "hpet": "true",
+ "nx": "true",
+ "pae": "true",
+ "timeoffset": "-25200",
+ "vga": "std",
+ "videoram": "8",
+ "viridian": "true",
+ "viridian_reference_tsc": "true",
+ "viridian_time_ref_count": "true"
+ },
+ "state": "poweredon",
+ "uuid": "e3c0b2d5-5f05-424e-479c-d3df8b3e7cda",
+ "xenstore_data": {
+ "vm-data": ""
}
- ],
- "domid": "56",
- "folder": "",
- "hardware": {
- "memory_mb": 8192,
- "num_cpu_cores_per_socket": 2,
- "num_cpus": 4
- },
- "home_server": "",
- "is_template": false,
- "name": "testvm_11",
- "name_desc": "",
- "networks": [
- {
- "gateway": "192.168.0.254",
- "gateway6": "fc00::fffe",
- "ip": "192.168.0.200",
- "ip6": [
- "fe80:0000:0000:0000:e9cb:625a:32c5:c291",
- "fc00:0000:0000:0000:0000:0000:0000:0001"
- ],
- "mac": "ba:91:3a:48:20:76",
- "mtu": "1500",
- "name": "Pool-wide network associated with eth1",
- "netmask": "255.255.255.128",
- "prefix": "25",
- "prefix6": "64",
- "vif_device": "0"
- }
- ],
- "other_config": {
- "base_template_name": "Windows Server 2016 (64-bit)",
- "import_task": "OpaqueRef:e43eb71c-45d6-5351-09ff-96e4fb7d0fa5",
- "install-methods": "cdrom",
- "instant": "true",
- "mac_seed": "f83e8d8a-cfdc-b105-b054-ef5cb416b77e"
- },
- "platform": {
- "acpi": "1",
- "apic": "true",
- "cores-per-socket": "2",
- "device_id": "0002",
- "hpet": "true",
- "nx": "true",
- "pae": "true",
- "timeoffset": "-25200",
- "vga": "std",
- "videoram": "8",
- "viridian": "true",
- "viridian_reference_tsc": "true",
- "viridian_time_ref_count": "true"
- },
- "state": "poweredon",
- "uuid": "e3c0b2d5-5f05-424e-479c-d3df8b3e7cda",
- "xenstore_data": {
- "vm-data": ""
}
- }
changes:
description: Detected or made changes to VM.
returned: always
type: list
- sample: [
- {
- "hardware": [
- "num_cpus"
- ]
- },
- {
- "disks_changed": [
- [],
- [
- "size"
+ sample:
+ [
+ {
+ "hardware": [
+ "num_cpus"
]
- ]
- },
- {
- "disks_new": [
- {
- "name": "new-disk",
- "name_desc": "",
- "position": 2,
- "size_gb": "4",
- "vbd_userdevice": "2"
- }
- ]
- },
- {
- "cdrom": [
- "type",
- "iso_name"
- ]
- },
- {
- "networks_changed": [
- [
- "mac"
- ],
- ]
- },
- {
- "networks_new": [
- {
- "name": "Pool-wide network associated with eth2",
- "position": 1,
- "vif_device": "1"
- }
- ]
- },
- "need_poweredoff"
- ]
+ },
+ {
+ "disks_changed": [
+ [],
+ [
+ "size"
+ ]
+ ]
+ },
+ {
+ "disks_new": [
+ {
+ "name": "new-disk",
+ "name_desc": "",
+ "position": 2,
+ "size_gb": "4",
+ "vbd_userdevice": "2"
+ }
+ ]
+ },
+ {
+ "cdrom": [
+ "type",
+ "iso_name"
+ ]
+ },
+ {
+ "networks_changed": [
+ [
+ "mac"
+ ]
+ ]
+ },
+ {
+ "networks_new": [
+ {
+ "name": "Pool-wide network associated with eth2",
+ "position": 1,
+ "vif_device": "1"
+ }
+ ]
+ },
+ "need_poweredoff"
+ ]
"""
import re
diff --git a/plugins/modules/xenserver_guest_info.py b/plugins/modules/xenserver_guest_info.py
index 44cc6e054a..d11a69025b 100644
--- a/plugins/modules/xenserver_guest_info.py
+++ b/plugins/modules/xenserver_guest_info.py
@@ -61,90 +61,91 @@ EXAMPLES = r"""
RETURN = r"""
instance:
- description: Metadata about the VM.
- returned: always
- type: dict
- sample: {
- "cdrom": {
- "type": "none"
+ description: Metadata about the VM.
+ returned: always
+ type: dict
+ sample:
+ {
+ "cdrom": {
+ "type": "none"
+ },
+ "customization_agent": "native",
+ "disks": [
+ {
+ "name": "testvm_11-0",
+ "name_desc": "",
+ "os_device": "xvda",
+ "size": 42949672960,
+ "sr": "Local storage",
+ "sr_uuid": "0af1245e-bdb0-ba33-1446-57a962ec4075",
+ "vbd_userdevice": "0"
},
- "customization_agent": "native",
- "disks": [
- {
- "name": "testvm_11-0",
- "name_desc": "",
- "os_device": "xvda",
- "size": 42949672960,
- "sr": "Local storage",
- "sr_uuid": "0af1245e-bdb0-ba33-1446-57a962ec4075",
- "vbd_userdevice": "0"
- },
- {
- "name": "testvm_11-1",
- "name_desc": "",
- "os_device": "xvdb",
- "size": 42949672960,
- "sr": "Local storage",
- "sr_uuid": "0af1245e-bdb0-ba33-1446-57a962ec4075",
- "vbd_userdevice": "1"
- }
- ],
- "domid": "56",
- "folder": "",
- "hardware": {
- "memory_mb": 8192,
- "num_cpu_cores_per_socket": 2,
- "num_cpus": 4
- },
- "home_server": "",
- "is_template": false,
- "name": "testvm_11",
- "name_desc": "",
- "networks": [
- {
- "gateway": "192.168.0.254",
- "gateway6": "fc00::fffe",
- "ip": "192.168.0.200",
- "ip6": [
- "fe80:0000:0000:0000:e9cb:625a:32c5:c291",
- "fc00:0000:0000:0000:0000:0000:0000:0001"
- ],
- "mac": "ba:91:3a:48:20:76",
- "mtu": "1500",
- "name": "Pool-wide network associated with eth1",
- "netmask": "255.255.255.128",
- "prefix": "25",
- "prefix6": "64",
- "vif_device": "0"
- }
- ],
- "other_config": {
- "base_template_name": "Windows Server 2016 (64-bit)",
- "import_task": "OpaqueRef:e43eb71c-45d6-5351-09ff-96e4fb7d0fa5",
- "install-methods": "cdrom",
- "instant": "true",
- "mac_seed": "f83e8d8a-cfdc-b105-b054-ef5cb416b77e"
- },
- "platform": {
- "acpi": "1",
- "apic": "true",
- "cores-per-socket": "2",
- "device_id": "0002",
- "hpet": "true",
- "nx": "true",
- "pae": "true",
- "timeoffset": "-25200",
- "vga": "std",
- "videoram": "8",
- "viridian": "true",
- "viridian_reference_tsc": "true",
- "viridian_time_ref_count": "true"
- },
- "state": "poweredon",
- "uuid": "e3c0b2d5-5f05-424e-479c-d3df8b3e7cda",
- "xenstore_data": {
- "vm-data": ""
+ {
+ "name": "testvm_11-1",
+ "name_desc": "",
+ "os_device": "xvdb",
+ "size": 42949672960,
+ "sr": "Local storage",
+ "sr_uuid": "0af1245e-bdb0-ba33-1446-57a962ec4075",
+ "vbd_userdevice": "1"
}
+ ],
+ "domid": "56",
+ "folder": "",
+ "hardware": {
+ "memory_mb": 8192,
+ "num_cpu_cores_per_socket": 2,
+ "num_cpus": 4
+ },
+ "home_server": "",
+ "is_template": false,
+ "name": "testvm_11",
+ "name_desc": "",
+ "networks": [
+ {
+ "gateway": "192.168.0.254",
+ "gateway6": "fc00::fffe",
+ "ip": "192.168.0.200",
+ "ip6": [
+ "fe80:0000:0000:0000:e9cb:625a:32c5:c291",
+ "fc00:0000:0000:0000:0000:0000:0000:0001"
+ ],
+ "mac": "ba:91:3a:48:20:76",
+ "mtu": "1500",
+ "name": "Pool-wide network associated with eth1",
+ "netmask": "255.255.255.128",
+ "prefix": "25",
+ "prefix6": "64",
+ "vif_device": "0"
+ }
+ ],
+ "other_config": {
+ "base_template_name": "Windows Server 2016 (64-bit)",
+ "import_task": "OpaqueRef:e43eb71c-45d6-5351-09ff-96e4fb7d0fa5",
+ "install-methods": "cdrom",
+ "instant": "true",
+ "mac_seed": "f83e8d8a-cfdc-b105-b054-ef5cb416b77e"
+ },
+ "platform": {
+ "acpi": "1",
+ "apic": "true",
+ "cores-per-socket": "2",
+ "device_id": "0002",
+ "hpet": "true",
+ "nx": "true",
+ "pae": "true",
+ "timeoffset": "-25200",
+ "vga": "std",
+ "videoram": "8",
+ "viridian": "true",
+ "viridian_reference_tsc": "true",
+ "viridian_time_ref_count": "true"
+ },
+ "state": "poweredon",
+ "uuid": "e3c0b2d5-5f05-424e-479c-d3df8b3e7cda",
+ "xenstore_data": {
+ "vm-data": ""
+ }
}
"""
diff --git a/plugins/modules/xenserver_guest_powerstate.py b/plugins/modules/xenserver_guest_powerstate.py
index cbba33920f..c3436300fe 100644
--- a/plugins/modules/xenserver_guest_powerstate.py
+++ b/plugins/modules/xenserver_guest_powerstate.py
@@ -88,90 +88,91 @@ EXAMPLES = r"""
RETURN = r"""
instance:
- description: Metadata about the VM.
- returned: always
- type: dict
- sample: {
- "cdrom": {
- "type": "none"
+ description: Metadata about the VM.
+ returned: always
+ type: dict
+ sample:
+ {
+ "cdrom": {
+ "type": "none"
+ },
+ "customization_agent": "native",
+ "disks": [
+ {
+ "name": "windows-template-testing-0",
+ "name_desc": "",
+ "os_device": "xvda",
+ "size": 42949672960,
+ "sr": "Local storage",
+ "sr_uuid": "0af1245e-bdb0-ba33-1446-57a962ec4075",
+ "vbd_userdevice": "0"
},
- "customization_agent": "native",
- "disks": [
- {
- "name": "windows-template-testing-0",
- "name_desc": "",
- "os_device": "xvda",
- "size": 42949672960,
- "sr": "Local storage",
- "sr_uuid": "0af1245e-bdb0-ba33-1446-57a962ec4075",
- "vbd_userdevice": "0"
- },
- {
- "name": "windows-template-testing-1",
- "name_desc": "",
- "os_device": "xvdb",
- "size": 42949672960,
- "sr": "Local storage",
- "sr_uuid": "0af1245e-bdb0-ba33-1446-57a962ec4075",
- "vbd_userdevice": "1"
- }
- ],
- "domid": "56",
- "folder": "",
- "hardware": {
- "memory_mb": 8192,
- "num_cpu_cores_per_socket": 2,
- "num_cpus": 4
- },
- "home_server": "",
- "is_template": false,
- "name": "windows-template-testing",
- "name_desc": "",
- "networks": [
- {
- "gateway": "192.168.0.254",
- "gateway6": "fc00::fffe",
- "ip": "192.168.0.200",
- "ip6": [
- "fe80:0000:0000:0000:e9cb:625a:32c5:c291",
- "fc00:0000:0000:0000:0000:0000:0000:0001"
- ],
- "mac": "ba:91:3a:48:20:76",
- "mtu": "1500",
- "name": "Pool-wide network associated with eth1",
- "netmask": "255.255.255.128",
- "prefix": "25",
- "prefix6": "64",
- "vif_device": "0"
- }
- ],
- "other_config": {
- "base_template_name": "Windows Server 2016 (64-bit)",
- "import_task": "OpaqueRef:e43eb71c-45d6-5351-09ff-96e4fb7d0fa5",
- "install-methods": "cdrom",
- "instant": "true",
- "mac_seed": "f83e8d8a-cfdc-b105-b054-ef5cb416b77e"
- },
- "platform": {
- "acpi": "1",
- "apic": "true",
- "cores-per-socket": "2",
- "device_id": "0002",
- "hpet": "true",
- "nx": "true",
- "pae": "true",
- "timeoffset": "-25200",
- "vga": "std",
- "videoram": "8",
- "viridian": "true",
- "viridian_reference_tsc": "true",
- "viridian_time_ref_count": "true"
- },
- "state": "poweredon",
- "uuid": "e3c0b2d5-5f05-424e-479c-d3df8b3e7cda",
- "xenstore_data": {
- "vm-data": ""
+ {
+ "name": "windows-template-testing-1",
+ "name_desc": "",
+ "os_device": "xvdb",
+ "size": 42949672960,
+ "sr": "Local storage",
+ "sr_uuid": "0af1245e-bdb0-ba33-1446-57a962ec4075",
+ "vbd_userdevice": "1"
}
+ ],
+ "domid": "56",
+ "folder": "",
+ "hardware": {
+ "memory_mb": 8192,
+ "num_cpu_cores_per_socket": 2,
+ "num_cpus": 4
+ },
+ "home_server": "",
+ "is_template": false,
+ "name": "windows-template-testing",
+ "name_desc": "",
+ "networks": [
+ {
+ "gateway": "192.168.0.254",
+ "gateway6": "fc00::fffe",
+ "ip": "192.168.0.200",
+ "ip6": [
+ "fe80:0000:0000:0000:e9cb:625a:32c5:c291",
+ "fc00:0000:0000:0000:0000:0000:0000:0001"
+ ],
+ "mac": "ba:91:3a:48:20:76",
+ "mtu": "1500",
+ "name": "Pool-wide network associated with eth1",
+ "netmask": "255.255.255.128",
+ "prefix": "25",
+ "prefix6": "64",
+ "vif_device": "0"
+ }
+ ],
+ "other_config": {
+ "base_template_name": "Windows Server 2016 (64-bit)",
+ "import_task": "OpaqueRef:e43eb71c-45d6-5351-09ff-96e4fb7d0fa5",
+ "install-methods": "cdrom",
+ "instant": "true",
+ "mac_seed": "f83e8d8a-cfdc-b105-b054-ef5cb416b77e"
+ },
+ "platform": {
+ "acpi": "1",
+ "apic": "true",
+ "cores-per-socket": "2",
+ "device_id": "0002",
+ "hpet": "true",
+ "nx": "true",
+ "pae": "true",
+ "timeoffset": "-25200",
+ "vga": "std",
+ "videoram": "8",
+ "viridian": "true",
+ "viridian_reference_tsc": "true",
+ "viridian_time_ref_count": "true"
+ },
+ "state": "poweredon",
+ "uuid": "e3c0b2d5-5f05-424e-479c-d3df8b3e7cda",
+ "xenstore_data": {
+ "vm-data": ""
+ }
}
"""
diff --git a/plugins/modules/xfconf.py b/plugins/modules/xfconf.py
index 003d1bfdfa..1cae7fb12b 100644
--- a/plugins/modules/xfconf.py
+++ b/plugins/modules/xfconf.py
@@ -178,7 +178,7 @@ class XFConfProperty(StateModuleHelper):
output_params = ('property', 'channel', 'value')
module = dict(
argument_spec=dict(
- state=dict(type='str', choices=("present", "absent"), default="present"),
+ state=dict(type='str', choices=('present', 'absent'), default='present'),
channel=dict(type='str', required=True),
property=dict(type='str', required=True),
value_type=dict(type='list', elements='str',
@@ -190,9 +190,6 @@ class XFConfProperty(StateModuleHelper):
required_together=[('value', 'value_type')],
supports_check_mode=True,
)
- use_old_vardict = False
-
- default_state = 'present'
def __init_module__(self):
self.runner = xfconf_runner(self.module)
@@ -209,8 +206,8 @@ class XFConfProperty(StateModuleHelper):
self.do_raise('xfconf-query failed with error (rc={0}): {1}'.format(rc, err))
result = out.rstrip()
- if "Value is an array with" in result:
- result = result.split("\n")
+ if 'Value is an array with' in result:
+ result = result.split('\n')
result.pop(0)
result.pop(0)
@@ -226,7 +223,7 @@ class XFConfProperty(StateModuleHelper):
self.vars.stdout = ctx.results_out
self.vars.stderr = ctx.results_err
self.vars.cmd = ctx.cmd
- self.vars.set("run_info", ctx.run_info, verbosity=4)
+ self.vars.set('run_info', ctx.run_info, verbosity=4)
self.vars.value = None
def state_present(self):
@@ -256,7 +253,7 @@ class XFConfProperty(StateModuleHelper):
self.vars.stdout = ctx.results_out
self.vars.stderr = ctx.results_err
self.vars.cmd = ctx.cmd
- self.vars.set("run_info", ctx.run_info, verbosity=4)
+ self.vars.set('run_info', ctx.run_info, verbosity=4)
if not self.vars.is_array:
self.vars.value = self.vars.value[0]
diff --git a/plugins/modules/xfconf_info.py b/plugins/modules/xfconf_info.py
index 90457eeaa1..74bebf35cb 100644
--- a/plugins/modules/xfconf_info.py
+++ b/plugins/modules/xfconf_info.py
@@ -142,7 +142,6 @@ class XFConfInfo(ModuleHelper):
),
supports_check_mode=True,
)
- use_old_vardict = False
def __init_module__(self):
self.runner = xfconf_runner(self.module, check_rc=True)
diff --git a/plugins/modules/xml.py b/plugins/modules/xml.py
index 483383b5d3..3a41cd8514 100644
--- a/plugins/modules/xml.py
+++ b/plugins/modules/xml.py
@@ -301,7 +301,9 @@ EXAMPLES = r"""
# Attributes
name: Scumm bar
location: Monkey island
-# Subnodes
+ # Value
+ +value: unreal
+ # Subnodes
_:
- floor: Pirate hall
- floor: Grog storage
@@ -331,12 +333,15 @@ actions:
description: A dictionary with the original xpath, namespaces and state.
type: dict
returned: success
- sample: {xpath: xpath, namespaces: [namespace1, namespace2], state: present}
-backup_file:
- description: The name of the backup file that was created.
- type: str
- returned: when O(backup=true)
- sample: /path/to/file.xml.1942.2017-08-24@14:16:01~
+ sample:
+ {
+ "xpath": "xpath",
+ "namespaces": [
+ "namespace1",
+ "namespace2"
+ ],
+ "state": "present"
+ }
count:
description: The count of xpath matches.
type: int
@@ -346,10 +351,6 @@ matches:
description: The xpath matches found.
type: list
returned: when parameter O(print_match) is set
-msg:
- description: A message related to the performed action(s).
- type: str
- returned: always
xmlstring:
description: An XML string of the resulting output.
type: str
@@ -633,7 +634,7 @@ def check_or_make_target(module, tree, xpath, namespaces):
# module.fail_json(msg="now tree=%s" % etree.tostring(tree, pretty_print=True))
elif eoa == "":
for node in tree.xpath(inner_xpath, namespaces=namespaces):
- if (node.text != eoa_value):
+ if node.text != eoa_value:
node.text = eoa_value
changed = True
@@ -756,6 +757,7 @@ def child_to_element(module, child, in_type):
(key, value) = next(iteritems(child))
if isinstance(value, MutableMapping):
children = value.pop('_', None)
+ child_value = value.pop('+value', None)
node = etree.Element(key, value)
@@ -765,6 +767,9 @@ def child_to_element(module, child, in_type):
subnodes = children_to_nodes(module, children)
node.extend(subnodes)
+
+ if child_value is not None:
+ node.text = child_value
else:
node = etree.Element(key)
node.text = value
diff --git a/plugins/modules/yarn.py b/plugins/modules/yarn.py
index 553e789e89..75b624e9d2 100644
--- a/plugins/modules/yarn.py
+++ b/plugins/modules/yarn.py
@@ -283,12 +283,12 @@ class Yarn(object):
def main():
arg_spec = dict(
- name=dict(default=None),
- path=dict(default=None, type='path'),
- version=dict(default=None),
+ name=dict(),
+ path=dict(type='path'),
+ version=dict(),
production=dict(default=False, type='bool'),
- executable=dict(default=None, type='path'),
- registry=dict(default=None),
+ executable=dict(type='path'),
+ registry=dict(),
state=dict(default='present', choices=['present', 'absent', 'latest']),
ignore_scripts=dict(default=False, type='bool'),
)
diff --git a/plugins/modules/yum_versionlock.py b/plugins/modules/yum_versionlock.py
index 9982700928..183ffdc6fe 100644
--- a/plugins/modules/yum_versionlock.py
+++ b/plugins/modules/yum_versionlock.py
@@ -80,7 +80,7 @@ packages:
returned: success
type: list
elements: str
- sample: ['httpd']
+ sample: ["httpd"]
state:
description: State of package(s).
returned: success
diff --git a/plugins/modules/zfs_facts.py b/plugins/modules/zfs_facts.py
index 64c1c8f5ca..1bbe73cb27 100644
--- a/plugins/modules/zfs_facts.py
+++ b/plugins/modules/zfs_facts.py
@@ -93,16 +93,63 @@ zfs_datasets:
description: ZFS dataset facts.
returned: always
type: str
- sample: {"aclinherit": "restricted", "aclmode": "discard", "atime": "on", "available": "43.8G", "canmount": "on", "casesensitivity": "sensitive",
- "checksum": "on", "compression": "off", "compressratio": "1.00x", "copies": "1", "creation": "Thu Jun 16 11:37 2016",
- "dedup": "off", "devices": "on", "exec": "on", "filesystem_count": "none", "filesystem_limit": "none", "logbias": "latency",
- "logicalreferenced": "18.5K", "logicalused": "3.45G", "mlslabel": "none", "mounted": "yes", "mountpoint": "/rpool", "name": "rpool",
- "nbmand": "off", "normalization": "none", "org.openindiana.caiman:install": "ready", "primarycache": "all", "quota": "none",
- "readonly": "off", "recordsize": "128K", "redundant_metadata": "all", "refcompressratio": "1.00x", "referenced": "29.5K",
- "refquota": "none", "refreservation": "none", "reservation": "none", "secondarycache": "all", "setuid": "on", "sharenfs": "off",
- "sharesmb": "off", "snapdir": "hidden", "snapshot_count": "none", "snapshot_limit": "none", "sync": "standard", "type": "filesystem",
- "used": "4.41G", "usedbychildren": "4.41G", "usedbydataset": "29.5K", "usedbyrefreservation": "0", "usedbysnapshots": "0",
- "utf8only": "off", "version": "5", "vscan": "off", "written": "29.5K", "xattr": "on", "zoned": "off"}
+ sample:
+ "aclinherit": "restricted"
+ "aclmode": "discard"
+ "atime": "on"
+ "available": "43.8G"
+ "canmount": "on"
+ "casesensitivity": "sensitive"
+ "checksum": "on"
+ "compression": "off"
+ "compressratio": "1.00x"
+ "copies": "1"
+ "creation": "Thu Jun 16 11:37 2016"
+ "dedup": "off"
+ "devices": "on"
+ "exec": "on"
+ "filesystem_count": "none"
+ "filesystem_limit": "none"
+ "logbias": "latency"
+ "logicalreferenced": "18.5K"
+ "logicalused": "3.45G"
+ "mlslabel": "none"
+ "mounted": "yes"
+ "mountpoint": "/rpool"
+ "name": "rpool"
+ "nbmand": "off"
+ "normalization": "none"
+ "org.openindiana.caiman:install": "ready"
+ "primarycache": "all"
+ "quota": "none"
+ "readonly": "off"
+ "recordsize": "128K"
+ "redundant_metadata": "all"
+ "refcompressratio": "1.00x"
+ "referenced": "29.5K"
+ "refquota": "none"
+ "refreservation": "none"
+ "reservation": "none"
+ "secondarycache": "all"
+ "setuid": "on"
+ "sharenfs": "off"
+ "sharesmb": "off"
+ "snapdir": "hidden"
+ "snapshot_count": "none"
+ "snapshot_limit": "none"
+ "sync": "standard"
+ "type": "filesystem"
+ "used": "4.41G"
+ "usedbychildren": "4.41G"
+ "usedbydataset": "29.5K"
+ "usedbyrefreservation": "0"
+ "usedbysnapshots": "0"
+ "utf8only": "off"
+ "version": "5"
+ "vscan": "off"
+ "written": "29.5K"
+ "xattr": "on"
+ "zoned": "off"
"""
from collections import defaultdict
diff --git a/plugins/modules/zpool.py b/plugins/modules/zpool.py
new file mode 100644
index 0000000000..3cce255415
--- /dev/null
+++ b/plugins/modules/zpool.py
@@ -0,0 +1,614 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2025, Tom Hesse
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r"""
+module: zpool
+short_description: Manage ZFS zpools
+version_added: 11.0.0
+description:
+ - Create, destroy, and modify ZFS zpools and their vdev layouts, pool properties, and filesystem properties.
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: partial
+ details:
+ - In check mode, any C(zpool) subcommand that supports the dry-run flag (C(-n)) will be run with C(-n) and its simulated
+ output is included in the module's diff results.
+ diff_mode:
+ support: full
+author:
+ - Tom Hesse (@tomhesse)
+options:
+ name:
+ description:
+ - Name of the zpool to manage.
+ required: true
+ type: str
+ state:
+ description:
+ - Whether the pool should exist.
+ choices: [present, absent]
+ default: present
+ type: str
+ disable_new_features:
+ description:
+ - If V(true), disable new ZFS feature flags when creating.
+ type: bool
+ default: false
+ force:
+ description:
+ - If V(true), force operations (for example overwrite existing devices).
+ type: bool
+ default: false
+ pool_properties:
+ description:
+ - Dictionary of ZFS pool properties to set (for example V(autoexpand), V(cachefile)).
+ type: dict
+ default: {}
+ filesystem_properties:
+ description:
+ - Dictionary of ZFS filesystem properties to set on the root dataset (for example V(compression), V(dedup)).
+ type: dict
+ default: {}
+ mountpoint:
+ description:
+ - Filesystem mountpoint for the root dataset.
+ type: str
+ altroot:
+ description:
+ - Alternate root for mounting filesystems.
+ type: str
+ temp_name:
+ description:
+ - Temporary name used during pool creation.
+ type: str
+ vdevs:
+ description:
+ - List of vdev definitions for the pool.
+ type: list
+ elements: dict
+ suboptions:
+ role:
+ description:
+ - Special vdev role (for example V(log), V(cache), V(spare)).
+ type: str
+ choices: [log, cache, spare, dedup, special]
+ type:
+ description:
+ - Vdev topology (for example V(stripe), V(mirror), V(raidz)).
+ type: str
+ choices: [stripe, mirror, raidz, raidz1, raidz2, raidz3]
+ default: stripe
+ disks:
+ description:
+ - List of device paths to include in this vdev.
+ required: true
+ type: list
+ elements: path
+"""
+
+EXAMPLES = r"""
+- name: Create pool "tank" on /dev/sda
+ community.general.zpool:
+ name: tank
+ vdevs:
+ - disks:
+ - /dev/sda
+
+- name: Create mirrored pool "tank"
+ community.general.zpool:
+ name: tank
+ vdevs:
+ - type: mirror
+ disks:
+ - /dev/sda
+ - /dev/sdb
+
+- name: Add a cache device to tank
+ community.general.zpool:
+ name: tank
+ vdevs:
+ - disks:
+ - /dev/sda
+ - role: cache
+ disks:
+ - /dev/nvme0n1
+
+- name: Set pool and filesystem properties
+ community.general.zpool:
+ name: tank
+ pool_properties:
+ ashift: 12
+ filesystem_properties:
+ compression: lz4
+ vdevs:
+ - disks:
+ - /dev/sda
+
+- name: Destroy pool "tank"
+ community.general.zpool:
+ name: tank
+ state: absent
+"""
+
+import re
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.cmd_runner import CmdRunner, cmd_runner_fmt
+
+
+class Zpool(object):
+
+ def __init__(self, module, name, disable_new_features, force, pool_properties, filesystem_properties, mountpoint, altroot, temp_name, vdevs):
+ self.module = module
+ self.name = name
+ self.disable_new_features = disable_new_features
+ self.force = force
+ self.pool_properties = pool_properties
+ self.filesystem_properties = filesystem_properties
+ self.mountpoint = mountpoint
+ self.altroot = altroot
+ self.temp_name = temp_name
+ self.vdevs = vdevs
+ self.zpool_cmd = module.get_bin_path('zpool', required=True)
+ self.zfs_cmd = module.get_bin_path('zfs', required=True)
+ self.changed = False
+
+ self.zpool_runner = CmdRunner(
+ module,
+ command=self.zpool_cmd,
+ arg_formats=dict(
+ subcommand=cmd_runner_fmt.as_list(),
+ disable_new_features=cmd_runner_fmt.as_bool('-d'),
+ force=cmd_runner_fmt.as_bool('-f'),
+ dry_run=cmd_runner_fmt.as_bool('-n'),
+ pool_properties=cmd_runner_fmt.as_func(
+ lambda props: sum([['-o', '{}={}'.format(prop, value)] for prop, value in (props or {}).items()], [])
+ ),
+ filesystem_properties=cmd_runner_fmt.as_func(
+ lambda props: sum([['-O', '{}={}'.format(prop, value)] for prop, value in (props or {}).items()], [])
+ ),
+ mountpoint=cmd_runner_fmt.as_opt_val('-m'),
+ altroot=cmd_runner_fmt.as_opt_val('-R'),
+ temp_name=cmd_runner_fmt.as_opt_val('-t'),
+ name=cmd_runner_fmt.as_list(),
+ vdevs=cmd_runner_fmt.as_func(
+ lambda vdevs: sum(
+ [
+ ([vdev['role']] if vdev.get('role') else [])
+ + ([] if vdev.get('type', 'stripe') == 'stripe' else [vdev['type']])
+ + vdev.get('disks', [])
+ for vdev in (vdevs or [])
+ ],
+ [],
+ )
+ ),
+ vdev_name=cmd_runner_fmt.as_list(),
+ scripted=cmd_runner_fmt.as_bool('-H'),
+ parsable=cmd_runner_fmt.as_bool('-p'),
+ columns=cmd_runner_fmt.as_opt_val('-o'),
+ properties=cmd_runner_fmt.as_list(),
+ assignment=cmd_runner_fmt.as_list(),
+ full_paths=cmd_runner_fmt.as_bool('-P'),
+ real_paths=cmd_runner_fmt.as_bool('-L'),
+ )
+ )
+
+ self.zfs_runner = CmdRunner(
+ module,
+ command=self.zfs_cmd,
+ arg_formats=dict(
+ subcommand=cmd_runner_fmt.as_list(),
+ scripted=cmd_runner_fmt.as_bool('-H'),
+ columns=cmd_runner_fmt.as_opt_val('-o'),
+ properties=cmd_runner_fmt.as_list(),
+ assignment=cmd_runner_fmt.as_list(),
+ name=cmd_runner_fmt.as_list()
+ )
+ )
+
+ def exists(self):
+ with self.zpool_runner('subcommand name') as ctx:
+ rc, stdout, stderr = ctx.run(subcommand='list', name=self.name)
+ return rc == 0
+
+ def create(self):
+ with self.zpool_runner(
+ 'subcommand disable_new_features force dry_run pool_properties filesystem_properties mountpoint altroot temp_name name vdevs',
+ check_rc=True
+ ) as ctx:
+ rc, stdout, stderr = ctx.run(subcommand='create', dry_run=self.module.check_mode)
+ self.changed = True
+ if self.module.check_mode:
+ return {'prepared': stdout}
+
+ def destroy(self):
+ if self.module.check_mode:
+ self.changed = True
+ return
+ with self.zpool_runner('subcommand name', check_rc=True) as ctx:
+ rc, stdout, stderr = ctx.run(subcommand='destroy')
+ self.changed = True
+
+ def list_pool_properties(self):
+ with self.zpool_runner('subcommand scripted columns properties name', check_rc=True) as ctx:
+ rc, stdout, stderr = ctx.run(
+ subcommand='get',
+ scripted=True,
+ columns='property,value',
+ properties='all',
+ )
+
+ props = {}
+ for line in stdout.splitlines():
+ prop, value = line.split('\t', 1)
+ props[prop] = value
+ return props
+
+ def set_pool_properties_if_changed(self):
+ current = self.list_pool_properties()
+ before = {}
+ after = {}
+ for prop, value in self.pool_properties.items():
+ if current.get(prop) != str(value):
+ before[prop] = current.get(prop)
+ if not self.module.check_mode:
+ with self.zpool_runner('subcommand assignment name', check_rc=True) as ctx:
+ rc, stdout, stderr = ctx.run(subcommand='set', assignment='{}={}'.format(prop, value))
+ after[prop] = str(value)
+ self.changed = True
+ return {'before': {'pool_properties': before}, 'after': {'pool_properties': after}}
+
+ def list_filesystem_properties(self):
+ with self.zfs_runner('subcommand scripted columns properties name', check_rc=True) as ctx:
+ rc, stdout, stderr = ctx.run(
+ subcommand='get',
+ scripted=True,
+ columns='property,value',
+ properties='all',
+ )
+
+ props = {}
+ for line in stdout.splitlines():
+ prop, value = line.split('\t', 1)
+ props[prop] = value
+ return props
+
+ def set_filesystem_properties_if_changed(self):
+ current = self.list_filesystem_properties()
+ before = {}
+ after = {}
+ for prop, value in self.filesystem_properties.items():
+ if current.get(prop) != str(value):
+ before[prop] = current.get(prop)
+ if not self.module.check_mode:
+ with self.zfs_runner('subcommand assignment name', check_rc=True) as ctx:
+ rc, stdout, stderr = ctx.run(subcommand='set', assignment='{}={}'.format(prop, value))
+ after[prop] = str(value)
+ self.changed = True
+ return {'before': {'filesystem_properties': before}, 'after': {'filesystem_properties': after}}
+
+ def base_device(self, device):
+ if not device.startswith('/dev/'):
+ return device
+
+ # loop devices
+ match = re.match(r'^(/dev/loop\d+)$', device)
+ if match:
+ return match.group(1)
+
+ # nvme drives
+ match = re.match(r'^(.*?)(p\d+)$', device)
+ if match:
+ return match.group(1)
+
+ # sata/scsi drives
+ match = re.match(r'^(/dev/(?:sd|vd)[a-z])\d+$', device)
+ if match:
+ return match.group(1)
+
+ return device
+
+ def get_current_layout(self):
+ with self.zpool_runner('subcommand full_paths real_paths name', check_rc=True) as ctx:
+ rc, stdout, stderr = ctx.run(subcommand='status', full_paths=True, real_paths=True)
+
+ vdevs = []
+ current = None
+ in_config = False
+
+ def flush_current(current):
+ if current:
+ if current.get('role') is None:
+ current.pop('role', None)
+ vdevs.append(current)
+ return None
+
+ for line in stdout.splitlines():
+ if not in_config:
+ if line.strip().startswith('config:'):
+ in_config = True
+ continue
+
+ if not line.strip() or line.strip().startswith('NAME'):
+ continue
+
+ partitions = line.split()
+ device = partitions[0]
+
+ if device == self.name:
+ continue
+
+ if device in ('logs', 'cache', 'spares'):
+ current = flush_current(current)
+ role = 'spare' if device == 'spares' else device.rstrip('s')
+ current = {'role': role, 'type': None, 'disks': []}
+ continue
+
+ match_group = re.match(r'^(mirror|raidz\d?)-\d+$', device)
+ if match_group:
+ if current and current.get('type') is not None:
+ current = flush_current(current)
+ kind = match_group.group(1)
+ role = current.get('role') if current and current.get('type') is None else None
+ current = {'role': role, 'type': kind, 'disks': []}
+ continue
+
+ if device.startswith('/'):
+ base_device = self.base_device(device)
+ if current:
+ if current.get('type') is None:
+ entry = {
+ 'type': 'stripe',
+ 'disks': [base_device]
+ }
+ if current.get('role'):
+ entry['role'] = current['role']
+ vdevs.append(entry)
+ current = None
+ else:
+ current['disks'].append(base_device)
+ else:
+ vdevs.append({'type': 'stripe', 'disks': [base_device]})
+ continue
+
+ if current and current.get('type') is not None:
+ current = flush_current(current)
+
+ return vdevs
+
+ def normalize_vdevs(self, vdevs):
+ alias = {'raidz': 'raidz1'}
+ normalized = []
+ for vdev in vdevs:
+ normalized_type = alias.get(vdev.get('type', 'stripe'), vdev.get('type', 'stripe'))
+ entry = {
+ 'type': normalized_type,
+ 'disks': sorted(vdev['disks']),
+ }
+ role = vdev.get('role')
+ if role is not None:
+ entry['role'] = role
+ normalized.append(entry)
+ return sorted(normalized, key=lambda x: (x.get('role', ''), x['type'], x['disks']))
+
+ def diff_layout(self):
+ current = self.normalize_vdevs(self.get_current_layout())
+ desired = self.normalize_vdevs(self.vdevs)
+
+ before = {'vdevs': current}
+ after = {'vdevs': desired}
+
+ if current != desired:
+ self.changed = True
+
+ return {'before': before, 'after': after}
+
+ def add_vdevs(self):
+ invalid_properties = [k for k in self.pool_properties if k != 'ashift']
+ if invalid_properties:
+ self.module.warn("zpool add only supports 'ashift', ignoring: {}".format(invalid_properties))
+
+ diff = self.diff_layout()
+ before_vdevs = diff['before']['vdevs']
+ after_vdevs = diff['after']['vdevs']
+
+ to_add = [vdev for vdev in after_vdevs if vdev not in before_vdevs]
+ if not to_add:
+ return {}
+
+ with self.zpool_runner('subcommand force dry_run pool_properties name vdevs', check_rc=True) as ctx:
+ rc, stdout, stderr = ctx.run(
+ subcommand='add',
+ dry_run=self.module.check_mode,
+ pool_properties={'ashift': self.pool_properties['ashift']} if 'ashift' in self.pool_properties else {},
+ vdevs=to_add,
+ )
+
+ self.changed = True
+ if self.module.check_mode:
+ return {'prepared': stdout}
+
+ def list_vdevs_with_names(self):
+ with self.zpool_runner('subcommand full_paths real_paths name', check_rc=True) as ctx:
+ rc, stdout, stderr = ctx.run(subcommand='status', full_paths=True, real_paths=True)
+ in_cfg = False
+ saw_pool = False
+ vdevs = []
+ current = None
+ for line in stdout.splitlines():
+ if not in_cfg:
+ if line.strip().startswith('config:'):
+ in_cfg = True
+ continue
+ if not line.strip() or line.strip().startswith('NAME'):
+ continue
+ partitions = line.strip().split()
+ device = partitions[0]
+ if not saw_pool:
+ if device == self.name:
+ saw_pool = True
+ continue
+ if re.match(r'^(mirror|raidz\d?)\-\d+$', device) or device in ('cache', 'logs', 'spares'):
+ if current:
+ vdevs.append(current)
+ vdev_type = ('stripe' if device in ('cache', 'logs', 'spares') else ('mirror' if device.startswith('mirror') else 'raidz'))
+ current = {'name': device, 'type': vdev_type, 'disks': []}
+ continue
+ if device.startswith('/') and current:
+ current['disks'].append(self.base_device(device))
+ continue
+ if device.startswith('/'):
+ base_device = self.base_device(device)
+ vdevs.append({'name': base_device, 'type': 'stripe', 'disks': [base_device]})
+ if current:
+ vdevs.append(current)
+ return vdevs
+
+ def remove_vdevs(self):
+ current = self.list_vdevs_with_names()
+ current_disks = {disk for vdev in current for disk in vdev['disks']}
+ desired_disks = {disk for vdev in self.vdevs for disk in vdev.get('disks', [])}
+ gone = current_disks - desired_disks
+ to_remove = [vdev['name'] for vdev in current if any(disk in gone for disk in vdev['disks'])]
+ if not to_remove:
+ return {}
+ with self.zpool_runner('subcommand dry_run name vdev_name', check_rc=True) as ctx:
+ rc, stdout, stderr = ctx.run(
+ subcommand='remove', dry_run=self.module.check_mode, vdev_name=to_remove)
+ self.changed = True
+ if self.module.check_mode:
+ return {'prepared': stdout}
+ before = [vdev['name'] for vdev in current]
+ after = [name for name in before if name not in to_remove]
+ return {'before': {'vdevs': before}, 'after': {'vdevs': after}}
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True),
+ state=dict(type='str', choices=['present', 'absent'], default='present'),
+ disable_new_features=dict(type='bool', default=False),
+ force=dict(type='bool', default=False),
+ pool_properties=dict(type='dict', default={}),
+ filesystem_properties=dict(type='dict', default={}),
+ mountpoint=dict(type='str'),
+ altroot=dict(type='str'),
+ temp_name=dict(type='str'),
+ vdevs=dict(
+ type='list',
+ elements='dict',
+ options=dict(
+ role=dict(
+ type='str',
+ choices=['log', 'cache', 'spare', 'dedup', 'special'],
+ ),
+ type=dict(
+ type='str',
+ choices=['stripe', 'mirror', 'raidz', 'raidz1', 'raidz2', 'raidz3'],
+ default='stripe',
+ ),
+ disks=dict(
+ type='list',
+ elements='path',
+ required=True,
+ ),
+ ),
+ ),
+ ),
+ supports_check_mode=True,
+ required_if=[('state', 'present', ['vdevs'])]
+ )
+
+ name = module.params.get('name')
+ state = module.params.get('state')
+ disable_new_features = module.params.get('disable_new_features')
+ force = module.params.get('force')
+ pool_properties = module.params.get('pool_properties')
+ filesystem_properties = module.params.get('filesystem_properties')
+ mountpoint = module.params.get('mountpoint')
+ altroot = module.params.get('altroot')
+ temp_name = module.params.get('temp_name')
+ vdevs = module.params.get('vdevs')
+
+ for property_key in ('pool_properties', 'filesystem_properties'):
+ for key, value in list(module.params.get(property_key, {}).items()):
+ if isinstance(value, bool):
+ module.params[property_key][key] = 'on' if value else 'off'
+
+ if state != 'absent':
+ for idx, vdev in enumerate(vdevs, start=1):
+ disks = vdev.get('disks')
+ if not isinstance(disks, list) or len(disks) == 0:
+ module.fail_json(msg="vdev #{idx}: at least one disk is required (got: {disks!r})".format(idx=idx, disks=disks))
+
+ result = dict(
+ name=name,
+ state=state,
+ )
+
+ zpool = Zpool(module, name, disable_new_features, force, pool_properties, filesystem_properties, mountpoint, altroot, temp_name, vdevs)
+
+ if state == 'present':
+ if zpool.exists():
+ vdev_layout_diff = zpool.diff_layout()
+
+ add_vdev_diff = zpool.add_vdevs() or {}
+ remove_vdev_diff = zpool.remove_vdevs() or {}
+ pool_properties_diff = zpool.set_pool_properties_if_changed()
+ filesystem_properties_diff = zpool.set_filesystem_properties_if_changed()
+
+ before = {}
+ after = {}
+ for diff in (vdev_layout_diff, pool_properties_diff, filesystem_properties_diff):
+ before.update(diff.get('before', {}))
+ after.update(diff.get('after', {}))
+
+ result['diff'] = {'before': before, 'after': after}
+
+ if module.check_mode:
+ prepared = ''
+ for diff in (add_vdev_diff, remove_vdev_diff):
+ if 'prepared' in diff:
+ prepared += (diff['prepared'] if not prepared else '\n' + diff['prepared'])
+ result['diff']['prepared'] = prepared
+ else:
+ if module.check_mode:
+ result['diff'] = zpool.create()
+ else:
+ before_vdevs = []
+ desired_vdevs = zpool.normalize_vdevs(zpool.vdevs)
+ zpool.create()
+ result['diff'] = {
+ 'before': {'state': 'absent', 'vdevs': before_vdevs},
+ 'after': {'state': state, 'vdevs': desired_vdevs},
+ }
+
+ elif state == 'absent':
+ if zpool.exists():
+ before_vdevs = zpool.get_current_layout()
+ zpool.destroy()
+ result['diff'] = {
+ 'before': {'state': 'present', 'vdevs': before_vdevs},
+ 'after': {'state': state, 'vdevs': []},
+ }
+ else:
+ result['diff'] = {}
+
+ result['diff']['before_header'] = name
+ result['diff']['after_header'] = name
+
+ result['changed'] = zpool.changed
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/plugins/modules/zpool_facts.py b/plugins/modules/zpool_facts.py
index 28c4644d87..93949de4f3 100644
--- a/plugins/modules/zpool_facts.py
+++ b/plugins/modules/zpool_facts.py
@@ -64,14 +64,46 @@ ansible_facts:
description: ZFS pool facts.
returned: always
type: str
- sample: {"allocated": "3.46G", "altroot": "-", "autoexpand": "off", "autoreplace": "off", "bootfs": "rpool/ROOT/openindiana",
- "cachefile": "-", "capacity": "6%", "comment": "-", "dedupditto": "0", "dedupratio": "1.00x", "delegation": "on",
- "expandsize": "-", "failmode": "wait", "feature@async_destroy": "enabled", "feature@bookmarks": "enabled", "feature@edonr": "enabled",
- "feature@embedded_data": "active", "feature@empty_bpobj": "active", "feature@enabled_txg": "active", "feature@extensible_dataset": "enabled",
- "feature@filesystem_limits": "enabled", "feature@hole_birth": "active", "feature@large_blocks": "enabled", "feature@lz4_compress": "active",
- "feature@multi_vdev_crash_dump": "enabled", "feature@sha512": "enabled", "feature@skein": "enabled", "feature@spacemap_histogram": "active",
- "fragmentation": "3%", "free": "46.3G", "freeing": "0", "guid": "15729052870819522408", "health": "ONLINE", "leaked": "0",
- "listsnapshots": "off", "name": "rpool", "readonly": "off", "size": "49.8G", "version": "-"}
+ sample:
+ "allocated": "3.46G"
+ "altroot": "-"
+ "autoexpand": "off"
+ "autoreplace": "off"
+ "bootfs": "rpool/ROOT/openindiana"
+ "cachefile": "-"
+ "capacity": "6%"
+ "comment": "-"
+ "dedupditto": "0"
+ "dedupratio": "1.00x"
+ "delegation": "on"
+ "expandsize": "-"
+ "failmode": "wait"
+ "feature@async_destroy": "enabled"
+ "feature@bookmarks": "enabled"
+ "feature@edonr": "enabled"
+ "feature@embedded_data": "active"
+ "feature@empty_bpobj": "active"
+ "feature@enabled_txg": "active"
+ "feature@extensible_dataset": "enabled"
+ "feature@filesystem_limits": "enabled"
+ "feature@hole_birth": "active"
+ "feature@large_blocks": "enabled"
+ "feature@lz4_compress": "active"
+ "feature@multi_vdev_crash_dump": "enabled"
+ "feature@sha512": "enabled"
+ "feature@skein": "enabled"
+ "feature@spacemap_histogram": "active"
+ "fragmentation": "3%"
+ "free": "46.3G"
+ "freeing": "0"
+ "guid": "15729052870819522408"
+ "health": "ONLINE"
+ "leaked": "0"
+ "listsnapshots": "off"
+ "name": "rpool"
+ "readonly": "off"
+ "size": "49.8G"
+ "version": "-"
name:
description: ZFS pool name.
returned: always
diff --git a/plugins/modules/zypper.py b/plugins/modules/zypper.py
index 8ed359d32e..a9e3bf5257 100644
--- a/plugins/modules/zypper.py
+++ b/plugins/modules/zypper.py
@@ -605,22 +605,22 @@ def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(required=True, aliases=['pkg'], type='list', elements='str'),
- state=dict(required=False, default='present', choices=['absent', 'installed', 'latest', 'present', 'removed', 'dist-upgrade']),
- type=dict(required=False, default='package', choices=['package', 'patch', 'pattern', 'product', 'srcpackage', 'application']),
- extra_args_precommand=dict(required=False, default=None),
- disable_gpg_check=dict(required=False, default=False, type='bool'),
- disable_recommends=dict(required=False, default=True, type='bool'),
- force=dict(required=False, default=False, type='bool'),
- force_resolution=dict(required=False, default=False, type='bool'),
- update_cache=dict(required=False, aliases=['refresh'], default=False, type='bool'),
- oldpackage=dict(required=False, default=False, type='bool'),
- extra_args=dict(required=False, default=None),
- allow_vendor_change=dict(required=False, default=False, type='bool'),
- replacefiles=dict(required=False, default=False, type='bool'),
- clean_deps=dict(required=False, default=False, type='bool'),
- simple_errors=dict(required=False, default=False, type='bool'),
- quiet=dict(required=False, default=True, type='bool'),
- skip_post_errors=dict(required=False, default=False, type='bool'),
+ state=dict(default='present', choices=['absent', 'installed', 'latest', 'present', 'removed', 'dist-upgrade']),
+ type=dict(default='package', choices=['package', 'patch', 'pattern', 'product', 'srcpackage', 'application']),
+ extra_args_precommand=dict(),
+ disable_gpg_check=dict(default=False, type='bool'),
+ disable_recommends=dict(default=True, type='bool'),
+ force=dict(default=False, type='bool'),
+ force_resolution=dict(default=False, type='bool'),
+ update_cache=dict(aliases=['refresh'], default=False, type='bool'),
+ oldpackage=dict(default=False, type='bool'),
+ extra_args=dict(),
+ allow_vendor_change=dict(default=False, type='bool'),
+ replacefiles=dict(default=False, type='bool'),
+ clean_deps=dict(default=False, type='bool'),
+ simple_errors=dict(default=False, type='bool'),
+ quiet=dict(default=True, type='bool'),
+ skip_post_errors=dict(default=False, type='bool'),
),
supports_check_mode=True
)
diff --git a/plugins/modules/zypper_repository.py b/plugins/modules/zypper_repository.py
index 37aa36c601..e6beeca9a4 100644
--- a/plugins/modules/zypper_repository.py
+++ b/plugins/modules/zypper_repository.py
@@ -142,6 +142,7 @@ from ansible.module_utils.basic import AnsibleModule, missing_required_lib
from ansible.module_utils.urls import fetch_url
from ansible.module_utils.common.text.converters import to_text
+from ansible.module_utils.six import PY3
from ansible.module_utils.six.moves import configparser, StringIO
from io import open
@@ -173,7 +174,10 @@ def _parse_repos(module):
opts = {}
for o in REPO_OPTS:
opts[o] = repo.getAttribute(o)
- opts['url'] = repo.getElementsByTagName('url')[0].firstChild.data
+ try:
+ opts['url'] = repo.getElementsByTagName('url')[0].firstChild.data
+ except IndexError:
+ opts['url'] = repo.getAttribute('metalink')
# A repo can be uniquely identified by an alias + url
repos.append(opts)
return repos
@@ -246,7 +250,7 @@ def repo_exists(module, repodata, overwrite_multiple):
module.fail_json(msg=errmsg)
-def addmodify_repo(module, repodata, old_repos, zypper_version, warnings):
+def addmodify_repo(module, repodata, old_repos, zypper_version):
"Adds the repo, removes old repos before, that would conflict."
repo = repodata['url']
cmd = _get_cmd(module, 'addrepo', '--check')
@@ -259,7 +263,7 @@ def addmodify_repo(module, repodata, old_repos, zypper_version, warnings):
if zypper_version >= LooseVersion('1.12.25'):
cmd.extend(['--priority', str(repodata['priority'])])
else:
- warnings.append("Setting priority only available for zypper >= 1.12.25. Ignoring priority argument.")
+ module.warn("Setting priority only available for zypper >= 1.12.25. Ignoring priority argument.")
if repodata['enabled'] == '0':
cmd.append('--disable')
@@ -273,7 +277,7 @@ def addmodify_repo(module, repodata, old_repos, zypper_version, warnings):
else:
cmd.append('--no-gpgcheck')
else:
- warnings.append("Enabling/disabling gpgcheck only available for zypper >= 1.6.2. Using zypper default value.")
+ module.warn("Enabling/disabling gpgcheck only available for zypper >= 1.6.2. Using zypper default value.")
if repodata['autorefresh'] == '1':
cmd.append('--refresh')
@@ -322,17 +326,17 @@ def runrefreshrepo(module, auto_import_keys=False, shortname=None):
def main():
module = AnsibleModule(
argument_spec=dict(
- name=dict(required=False),
- repo=dict(required=False),
+ name=dict(),
+ repo=dict(),
state=dict(choices=['present', 'absent'], default='present'),
- runrefresh=dict(required=False, default=False, type='bool'),
- description=dict(required=False),
- disable_gpg_check=dict(required=False, default=False, type='bool'),
- autorefresh=dict(required=False, default=True, type='bool', aliases=['refresh']),
- priority=dict(required=False, type='int'),
- enabled=dict(required=False, default=True, type='bool'),
- overwrite_multiple=dict(required=False, default=False, type='bool'),
- auto_import_keys=dict(required=False, default=False, type='bool'),
+ runrefresh=dict(default=False, type='bool'),
+ description=dict(),
+ disable_gpg_check=dict(default=False, type='bool'),
+ autorefresh=dict(default=True, type='bool', aliases=['refresh']),
+ priority=dict(type='int'),
+ enabled=dict(default=True, type='bool'),
+ overwrite_multiple=dict(default=False, type='bool'),
+ auto_import_keys=dict(default=False, type='bool'),
),
supports_check_mode=False,
required_one_of=[['state', 'runrefresh']],
@@ -346,7 +350,6 @@ def main():
runrefresh = module.params['runrefresh']
zypper_version = get_zypper_version(module)
- warnings = [] # collect warning messages for final output
repodata = {
'url': repo,
@@ -407,7 +410,10 @@ def main():
repofile = configparser.ConfigParser()
try:
- repofile.readfp(StringIO(repofile_text))
+ if PY3:
+ repofile.read_file(StringIO(repofile_text))
+ else:
+ repofile.readfp(StringIO(repofile_text))
except configparser.Error:
module.fail_json(msg='Invalid format, .repo file could not be parsed')
@@ -453,7 +459,7 @@ def main():
if runrefresh:
runrefreshrepo(module, auto_import_keys, shortname)
exit_unchanged()
- rc, stdout, stderr = addmodify_repo(module, repodata, old_repos, zypper_version, warnings)
+ rc, stdout, stderr = addmodify_repo(module, repodata, old_repos, zypper_version)
if rc == 0 and (runrefresh or auto_import_keys):
runrefreshrepo(module, auto_import_keys, shortname)
elif state == 'absent':
@@ -462,9 +468,9 @@ def main():
rc, stdout, stderr = remove_repo(module, shortname)
if rc == 0:
- module.exit_json(changed=True, repodata=repodata, state=state, warnings=warnings)
+ module.exit_json(changed=True, repodata=repodata, state=state)
else:
- module.fail_json(msg="Zypper failed with rc %s" % rc, rc=rc, stdout=stdout, stderr=stderr, repodata=repodata, state=state, warnings=warnings)
+ module.fail_json(msg="Zypper failed with rc %s" % rc, rc=rc, stdout=stdout, stderr=stderr, repodata=repodata, state=state)
if __name__ == '__main__':
diff --git a/plugins/test/a_module.py b/plugins/test/a_module.py
index 0d6cecac6a..14f7ae27f2 100644
--- a/plugins/test/a_module.py
+++ b/plugins/test/a_module.py
@@ -6,18 +6,18 @@ from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
- name: a_module
- short_description: Test whether a given string refers to an existing module or action plugin
- version_added: 4.0.0
- author: Felix Fontein (@felixfontein)
- description:
- - Test whether a given string refers to an existing module or action plugin.
- - This can be useful in roles, which can use this to ensure that required modules are present ahead of time.
- options:
- _input:
- description: A string denoting a fully qualified collection name (FQCN) of a module or action plugin.
- type: string
- required: true
+name: a_module
+short_description: Test whether a given string refers to an existing module or action plugin
+version_added: 4.0.0
+author: Felix Fontein (@felixfontein)
+description:
+ - Test whether a given string refers to an existing module or action plugin.
+ - This can be useful in roles, which can use this to ensure that required modules are present ahead of time.
+options:
+ _input:
+ description: A string denoting a fully qualified collection name (FQCN) of a module or action plugin.
+ type: string
+ required: true
'''
EXAMPLES = '''
@@ -34,9 +34,9 @@ EXAMPLES = '''
'''
RETURN = '''
- _value:
- description: Whether the module or action plugin denoted by the input exists.
- type: boolean
+_value:
+ description: Whether the module or action plugin denoted by the input exists.
+ type: boolean
'''
from ansible.plugins.loader import action_loader, module_loader
diff --git a/plugins/test/ansible_type.py b/plugins/test/ansible_type.py
index f7c004f33f..45bf1b42e5 100644
--- a/plugins/test/ansible_type.py
+++ b/plugins/test/ansible_type.py
@@ -6,31 +6,31 @@ from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
- name: ansible_type
- short_description: Validate input type
- version_added: "9.2.0"
- author: Vladimir Botka (@vbotka)
- description: This test validates input type.
- options:
- _input:
- description: Input data.
- type: raw
- required: true
- dtype:
- description: A single data type, or a data types list to be validated.
- type: raw
- required: true
- alias:
- description: Data type aliases.
- default: {}
- type: dictionary
+name: ansible_type
+short_description: Validate input type
+version_added: "9.2.0"
+author: Vladimir Botka (@vbotka)
+description: This test validates input type.
+options:
+ _input:
+ description: Input data.
+ type: raw
+ required: true
+ dtype:
+ description: A single data type, or a data types list to be validated.
+ type: raw
+ required: true
+ alias:
+ description: Data type aliases.
+ default: {}
+ type: dictionary
'''
EXAMPLES = '''
-
# Substitution converts str to AnsibleUnicode or _AnsibleTaggedStr
# ----------------------------------------------------------------
+---
# String. AnsibleUnicode or _AnsibleTaggedStr.
dtype:
- AnsibleUnicode
@@ -39,6 +39,7 @@ data: "abc"
result: '{{ data is community.general.ansible_type(dtype) }}'
# result => true
+---
# String. AnsibleUnicode/_AnsibleTaggedStr alias str.
alias: {"AnsibleUnicode": "str", "_AnsibleTaggedStr": "str"}
dtype: str
@@ -46,6 +47,7 @@ data: "abc"
result: '{{ data is community.general.ansible_type(dtype, alias) }}'
# result => true
+---
# List. All items are AnsibleUnicode/_AnsibleTaggedStr.
dtype:
- list[AnsibleUnicode]
@@ -54,6 +56,7 @@ data: ["a", "b", "c"]
result: '{{ data is community.general.ansible_type(dtype) }}'
# result => true
+---
# Dictionary. All keys and values are AnsibleUnicode/_AnsibleTaggedStr.
dtype:
- dict[AnsibleUnicode, AnsibleUnicode]
@@ -65,41 +68,49 @@ result: '{{ data is community.general.ansible_type(dtype) }}'
# No substitution and no alias. Type of strings is str
# ----------------------------------------------------
+---
# String
dtype: str
result: '{{ "abc" is community.general.ansible_type(dtype) }}'
# result => true
+---
# Integer
dtype: int
result: '{{ 123 is community.general.ansible_type(dtype) }}'
# result => true
+---
# Float
dtype: float
result: '{{ 123.45 is community.general.ansible_type(dtype) }}'
# result => true
+---
# Boolean
dtype: bool
result: '{{ true is community.general.ansible_type(dtype) }}'
# result => true
+---
# List. All items are strings.
dtype: list[str]
result: '{{ ["a", "b", "c"] is community.general.ansible_type(dtype) }}'
# result => true
+---
# List of dictionaries.
dtype: list[dict]
result: '{{ [{"a": 1}, {"b": 2}] is community.general.ansible_type(dtype) }}'
# result => true
+---
# Dictionary. All keys are strings. All values are integers.
dtype: dict[str, int]
result: '{{ {"a": 1} is community.general.ansible_type(dtype) }}'
# result => true
+---
# Dictionary. All keys are strings. All values are integers.
dtype: dict[str, int]
result: '{{ {"a": 1, "b": 2} is community.general.ansible_type(dtype) }}'
@@ -108,6 +119,7 @@ result: '{{ {"a": 1, "b": 2} is community.general.ansible_type(dtype) }}'
# Type of strings is AnsibleUnicode, _AnsibleTaggedStr, or str
# ------------------------------------------------------------
+---
# Dictionary. The keys are integers or strings. All values are strings.
alias:
AnsibleUnicode: str
@@ -118,6 +130,7 @@ data: {1: 'a', 'b': 'b'}
result: '{{ data is community.general.ansible_type(dtype, alias) }}'
# result => true
+---
# Dictionary. All keys are integers. All values are keys.
alias:
AnsibleUnicode: str
@@ -128,6 +141,7 @@ data: {1: 'a', 2: 'b'}
result: '{{ data is community.general.ansible_type(dtype, alias) }}'
# result => true
+---
# Dictionary. All keys are strings. Multiple types values.
alias:
AnsibleUnicode: str
@@ -135,10 +149,11 @@ alias:
_AnsibleTaggedInt: int
_AnsibleTaggedFloat: float
dtype: dict[str, bool|dict|float|int|list|str]
-data: {'a': 1, 'b': 1.1, 'c': 'abc', 'd': True, 'e': ['x', 'y', 'z'], 'f': {'x': 1, 'y': 2}}
+data: {'a': 1, 'b': 1.1, 'c': 'abc', 'd': true, 'e': ['x', 'y', 'z'], 'f': {'x': 1, 'y': 2}}
result: '{{ data is community.general.ansible_type(dtype, alias) }}'
# result => true
+---
# List. Multiple types items.
alias:
AnsibleUnicode: str
@@ -146,25 +161,28 @@ alias:
_AnsibleTaggedInt: int
_AnsibleTaggedFloat: float
dtype: list[bool|dict|float|int|list|str]
-data: [1, 2, 1.1, 'abc', True, ['x', 'y', 'z'], {'x': 1, 'y': 2}]
+data: [1, 2, 1.1, 'abc', true, ['x', 'y', 'z'], {'x': 1, 'y': 2}]
result: '{{ data is community.general.ansible_type(dtype, alias) }}'
# result => true
# Option dtype is list
# --------------------
+---
# AnsibleUnicode, _AnsibleTaggedStr, or str
dtype: ['AnsibleUnicode', '_AnsibleTaggedStr', 'str']
data: abc
result: '{{ data is community.general.ansible_type(dtype) }}'
# result => true
+---
# float or int
dtype: ['float', 'int', "_AnsibleTaggedInt", "_AnsibleTaggedFloat"]
data: 123
result: '{{ data is community.general.ansible_type(dtype) }}'
# result => true
+---
# float or int
dtype: ['float', 'int', "_AnsibleTaggedInt", "_AnsibleTaggedFloat"]
data: 123.45
@@ -174,23 +192,25 @@ result: '{{ data is community.general.ansible_type(dtype) }}'
# Multiple alias
# --------------
+---
# int alias number
alias:
- int: number
- float: number
- _AnsibleTaggedInt: number
- _AnsibleTaggedFloat: float
+ int: number
+ float: number
+ _AnsibleTaggedInt: number
+ _AnsibleTaggedFloat: float
dtype: number
data: 123
result: '{{ data is community.general.ansible_type(dtype, alias) }}'
# result => true
+---
# float alias number
alias:
- int: number
- float: number
- _AnsibleTaggedInt: number
- _AnsibleTaggedFloat: float
+ int: number
+ float: number
+ _AnsibleTaggedInt: number
+ _AnsibleTaggedFloat: float
dtype: number
data: 123.45
result: '{{ data is community.general.ansible_type(dtype, alias) }}'
@@ -198,9 +218,9 @@ result: '{{ data is community.general.ansible_type(dtype, alias) }}'
'''
RETURN = '''
- _value:
- description: Whether the data type is valid.
- type: bool
+_value:
+ description: Whether the data type is valid.
+ type: bool
'''
from ansible.errors import AnsibleFilterError
diff --git a/plugins/test/fqdn_valid.py b/plugins/test/fqdn_valid.py
index 1ec7742077..c8a143687a 100644
--- a/plugins/test/fqdn_valid.py
+++ b/plugins/test/fqdn_valid.py
@@ -17,41 +17,41 @@ else:
DOCUMENTATION = '''
- name: fqdn_valid
- short_description: Validates fully-qualified domain names against RFC 1123
- version_added: 8.1.0
- author: Vladimir Botka (@vbotka)
- requirements:
+name: fqdn_valid
+short_description: Validates fully-qualified domain names against RFC 1123
+version_added: 8.1.0
+author: Vladimir Botka (@vbotka)
+requirements:
- fqdn>=1.5.1 (PyPI)
- description:
- - This test validates Fully Qualified Domain Names (FQDNs)
- conforming to the Internet Engineering Task Force specification
- RFC 1123 and RFC 952.
- - The design intent is to validate that a string would be
- traditionally acceptable as a public Internet hostname to
- RFC-conforming software, which is a strict subset of the logic
- in modern web browsers like Mozilla Firefox and Chromium that
- determines whether make a DNS lookup.
- - Certificate Authorities like Let's Encrypt run a narrower set of
- string validation logic to determine validity for issuance. This
- test is not intended to achieve functional parity with CA
- issuance.
- - Single label names are allowed by default (O(min_labels=1)).
- options:
- _input:
- description: Name of the host.
- type: str
- required: true
- min_labels:
- description: Required minimum of labels, separated by period.
- default: 1
- type: int
- required: false
- allow_underscores:
- description: Allow underscore characters.
- default: false
- type: bool
- required: false
+description:
+ - This test validates Fully Qualified Domain Names (FQDNs)
+ conforming to the Internet Engineering Task Force specification
+ RFC 1123 and RFC 952.
+ - The design intent is to validate that a string would be
+ traditionally acceptable as a public Internet hostname to
+ RFC-conforming software, which is a strict subset of the logic
+ in modern web browsers like Mozilla Firefox and Chromium that
+ determines whether make a DNS lookup.
+ - Certificate Authorities like Let's Encrypt run a narrower set of
+ string validation logic to determine validity for issuance. This
+ test is not intended to achieve functional parity with CA
+ issuance.
+ - Single label names are allowed by default (O(min_labels=1)).
+options:
+ _input:
+ description: Name of the host.
+ type: str
+ required: true
+ min_labels:
+ description: Required minimum of labels, separated by period.
+ default: 1
+ type: int
+ required: false
+ allow_underscores:
+ description: Allow underscore characters.
+ default: false
+ type: bool
+ required: false
'''
EXAMPLES = '''
@@ -69,9 +69,9 @@ EXAMPLES = '''
'''
RETURN = '''
- _value:
- description: Whether the name is valid.
- type: bool
+_value:
+ description: Whether the name is valid.
+ type: bool
'''
diff --git a/tests/integration/requirements.yml b/tests/integration/requirements.yml
index b772fc82d1..dfe544e3cc 100644
--- a/tests/integration/requirements.yml
+++ b/tests/integration/requirements.yml
@@ -4,6 +4,6 @@
# SPDX-License-Identifier: GPL-3.0-or-later
collections:
-- ansible.posix
-- community.crypto
-- community.docker
+ - ansible.posix
+ - community.crypto
+ - community.docker
diff --git a/tests/integration/targets/aix_filesystem/tasks/main.yml b/tests/integration/targets/aix_filesystem/tasks/main.yml
index 878088f4e7..5c4f2c7e39 100644
--- a/tests/integration/targets/aix_filesystem/tasks/main.yml
+++ b/tests/integration/targets/aix_filesystem/tasks/main.yml
@@ -28,10 +28,10 @@
# It requires a host (nfshost) exporting the NFS
- name: Creating NFS filesystem from nfshost (Linux NFS server)
aix_filesystem:
- device: /home/ftp
- nfs_server: nfshost
- filesystem: /nfs/ftp
- state: present
+ device: /home/ftp
+ nfs_server: nfshost
+ filesystem: /nfs/ftp
+ state: present
# It requires a volume group named datavg (next three actions)
- name: Creating a logical volume testlv (aix_lvol module)
diff --git a/tests/integration/targets/alternatives/tasks/main.yml b/tests/integration/targets/alternatives/tasks/main.yml
index cd86b085d4..906463903f 100644
--- a/tests/integration/targets/alternatives/tasks/main.yml
+++ b/tests/integration/targets/alternatives/tasks/main.yml
@@ -9,60 +9,60 @@
- name: 'setup: create a dummy alternative'
block:
- - import_tasks: setup.yml
+ - import_tasks: setup.yml
- ##############
- # Test parameters:
- # link parameter present / absent ('with_link' variable)
- # with / without alternatives defined in alternatives file ('with_alternatives' variable)
- # auto / manual ('mode' variable)
+ ##############
+ # Test parameters:
+ # link parameter present / absent ('with_link' variable)
+ # with / without alternatives defined in alternatives file ('with_alternatives' variable)
+ # auto / manual ('mode' variable)
- - include_tasks: tests.yml
- with_nested:
- - [ true, false ] # with_link
- - [ true, false ] # with_alternatives
- - [ 'auto', 'manual' ] # mode
- loop_control:
- loop_var: test_conf
+ - include_tasks: tests.yml
+ with_nested:
+ - [true, false] # with_link
+ - [true, false] # with_alternatives
+ - ['auto', 'manual'] # mode
+ loop_control:
+ loop_var: test_conf
- ##########
- # Priority
- - block:
- - include_tasks: remove_links.yml
- - include_tasks: setup_test.yml
- # at least two iterations again
- - include_tasks: tests_set_priority.yml
- with_sequence: start=3 end=4
- vars:
- with_alternatives: true
- mode: auto
+ ##########
+ # Priority
+ - block:
+ - include_tasks: remove_links.yml
+ - include_tasks: setup_test.yml
+ # at least two iterations again
+ - include_tasks: tests_set_priority.yml
+ with_sequence: start=3 end=4
+ vars:
+ with_alternatives: true
+ mode: auto
- - block:
- - include_tasks: remove_links.yml
- - include_tasks: setup_test.yml
- # at least two iterations again
- - include_tasks: tests_set_priority.yml
- with_sequence: start=3 end=4
- vars:
- with_alternatives: false
- mode: auto
+ - block:
+ - include_tasks: remove_links.yml
+ - include_tasks: setup_test.yml
+ # at least two iterations again
+ - include_tasks: tests_set_priority.yml
+ with_sequence: start=3 end=4
+ vars:
+ with_alternatives: false
+ mode: auto
- # Test that path is checked: alternatives must fail when path is nonexistent
- - import_tasks: path_is_checked.yml
+ # Test that path is checked: alternatives must fail when path is nonexistent
+ - import_tasks: path_is_checked.yml
- # Test that subcommands commands work
- - import_tasks: subcommands.yml
+ # Test that subcommands commands work
+ - import_tasks: subcommands.yml
- # Test operation of the 'state' parameter
- - block:
- - include_tasks: remove_links.yml
- - include_tasks: tests_state.yml
+ # Test operation of the 'state' parameter
+ - block:
+ - include_tasks: remove_links.yml
+ - include_tasks: tests_state.yml
- # Test for the family parameter
- - block:
- - include_tasks: remove_links.yml
- - include_tasks: tests_family.yml
- when: ansible_os_family == 'RedHat'
+ # Test for the family parameter
+ - block:
+ - include_tasks: remove_links.yml
+ - include_tasks: tests_family.yml
+ when: ansible_os_family == 'RedHat'
# Cleanup
always:
diff --git a/tests/integration/targets/alternatives/tasks/setup.yml b/tests/integration/targets/alternatives/tasks/setup.yml
index ab2c398521..cadee7f7f2 100644
--- a/tests/integration/targets/alternatives/tasks/setup.yml
+++ b/tests/integration/targets/alternatives/tasks/setup.yml
@@ -5,11 +5,11 @@
- include_vars: '{{ item }}'
with_first_found:
- - files:
- - '{{ ansible_os_family }}-{{ ansible_distribution_version }}.yml'
- - '{{ ansible_os_family }}.yml'
- - default.yml
- paths: ../vars
+ - files:
+ - '{{ ansible_os_family }}-{{ ansible_distribution_version }}.yml'
+ - '{{ ansible_os_family }}.yml'
+ - default.yml
+ paths: ../vars
- template:
src: dummy_command
dest: /usr/bin/dummy{{ item }}
diff --git a/tests/integration/targets/alternatives/tasks/subcommands.yml b/tests/integration/targets/alternatives/tasks/subcommands.yml
index 678bbe68f4..3c70e6275d 100644
--- a/tests/integration/targets/alternatives/tasks/subcommands.yml
+++ b/tests/integration/targets/alternatives/tasks/subcommands.yml
@@ -89,7 +89,7 @@
assert:
that:
- cmd.rc == 2
- - '"No such file" in cmd.msg'
+ - '"No such file" in cmd.msg or "Error executing command." == cmd.msg'
- name: Get dummymain alternatives output
command:
@@ -172,7 +172,7 @@
assert:
that:
- cmd.rc == 2
- - '"No such file" in cmd.msg'
+ - '"No such file" in cmd.msg or "Error executing command." == cmd.msg'
- name: Get dummymain alternatives output
command:
diff --git a/tests/integration/targets/alternatives/tasks/test.yml b/tests/integration/targets/alternatives/tasks/test.yml
index ca59a4b554..3445f64555 100644
--- a/tests/integration/targets/alternatives/tasks/test.yml
+++ b/tests/integration/targets/alternatives/tasks/test.yml
@@ -7,32 +7,32 @@
msg: ' with_alternatives: {{ with_alternatives }}, mode: {{ mode }}'
- block:
- - name: set alternative (using link parameter)
- alternatives:
- name: dummy
- path: '/usr/bin/dummy{{ item }}'
- link: '/usr/bin/dummy'
- register: alternative
+ - name: set alternative (using link parameter)
+ alternatives:
+ name: dummy
+ path: '/usr/bin/dummy{{ item }}'
+ link: '/usr/bin/dummy'
+ register: alternative
- - name: check expected command was executed
- assert:
- that:
- - 'alternative is successful'
- - 'alternative is changed'
+ - name: check expected command was executed
+ assert:
+ that:
+ - 'alternative is successful'
+ - 'alternative is changed'
when: with_link
- block:
- - name: set alternative (without link parameter)
- alternatives:
- name: dummy
- path: '/usr/bin/dummy{{ item }}'
- register: alternative
+ - name: set alternative (without link parameter)
+ alternatives:
+ name: dummy
+ path: '/usr/bin/dummy{{ item }}'
+ register: alternative
- - name: check expected command was executed
- assert:
- that:
- - 'alternative is successful'
- - 'alternative is changed'
+ - name: check expected command was executed
+ assert:
+ that:
+ - 'alternative is successful'
+ - 'alternative is changed'
when: not with_link
- name: execute dummy command
diff --git a/tests/integration/targets/alternatives/tasks/tests.yml b/tests/integration/targets/alternatives/tasks/tests.yml
index 75e30cabea..63068ede54 100644
--- a/tests/integration/targets/alternatives/tasks/tests.yml
+++ b/tests/integration/targets/alternatives/tasks/tests.yml
@@ -4,13 +4,13 @@
# SPDX-License-Identifier: GPL-3.0-or-later
- block:
- - include_tasks: remove_links.yml
- - include_tasks: setup_test.yml
- # at least two iterations:
- # - first will use 'link currently absent',
- # - second will receive 'link currently points to'
- - include_tasks: test.yml
- with_sequence: start=1 end=2
+ - include_tasks: remove_links.yml
+ - include_tasks: setup_test.yml
+ # at least two iterations:
+ # - first will use 'link currently absent',
+ # - second will receive 'link currently points to'
+ - include_tasks: test.yml
+ with_sequence: start=1 end=2
vars:
with_link: '{{ test_conf[0] }}'
with_alternatives: '{{ test_conf[1] }}'
diff --git a/tests/integration/targets/android_sdk/tasks/main.yml b/tests/integration/targets/android_sdk/tasks/main.yml
index 46cf3192e1..3b49df4056 100644
--- a/tests/integration/targets/android_sdk/tasks/main.yml
+++ b/tests/integration/targets/android_sdk/tasks/main.yml
@@ -11,21 +11,21 @@
# java >= 17 is not available in RHEL and CentOS7 repos, which is required for sdkmanager to run
- name: Bail out if not supported
when:
- - "ansible_os_family == 'RedHat' and ansible_distribution_version is version('8.0', '<')"
+ - "ansible_os_family == 'RedHat' and ansible_distribution_version is version('8.0', '<')"
ansible.builtin.meta: end_play
- name: Run android_sdk tests
environment:
PATH: '{{ ansible_env.PATH }}:{{ android_sdk_location }}/cmdline-tools/latest/bin'
block:
- - import_tasks: setup.yml
+ - import_tasks: setup.yml
- - name: Run default tests
- import_tasks: default-tests.yml
- when: ansible_os_family != 'FreeBSD'
+ - name: Run default tests
+ import_tasks: default-tests.yml
+ when: ansible_os_family != 'FreeBSD'
- # Most of the important Android SDK packages are not available on FreeBSD (like, build-tools, platform-tools and so on),
- # but at least some of the functionality can be tested (like, downloading sources)
- - name: Run FreeBSD tests
- import_tasks: freebsd-tests.yml
- when: ansible_os_family == 'FreeBSD'
+ # Most of the important Android SDK packages are not available on FreeBSD (like, build-tools, platform-tools and so on),
+ # but at least some of the functionality can be tested (like, downloading sources)
+ - name: Run FreeBSD tests
+ import_tasks: freebsd-tests.yml
+ when: ansible_os_family == 'FreeBSD'
diff --git a/tests/integration/targets/android_sdk/tasks/setup.yml b/tests/integration/targets/android_sdk/tasks/setup.yml
index ff2e3eb3cf..9965403367 100644
--- a/tests/integration/targets/android_sdk/tasks/setup.yml
+++ b/tests/integration/targets/android_sdk/tasks/setup.yml
@@ -9,7 +9,16 @@
# SPDX-License-Identifier: GPL-3.0-or-later
- name: Include OS-specific variables
- include_vars: '{{ ansible_os_family }}.yml'
+ include_vars: '{{ lookup("first_found", params) }}'
+ vars:
+ params:
+ files:
+ - '{{ ansible_distribution }}-{{ ansible_distribution_version }}.yml'
+ - '{{ ansible_distribution }}-{{ ansible_distribution_major_version }}.yml'
+ - '{{ ansible_distribution }}.yml'
+ - '{{ ansible_os_family }}.yml'
+ paths:
+ - '{{ role_path }}/vars'
- name: Install dependencies
become: true
@@ -74,7 +83,7 @@
unarchive:
src: "{{ commandline_tools_link }}"
dest: "{{ android_cmdline_temp_dir }}"
- remote_src: yes
+ remote_src: true
creates: "{{ android_cmdline_temp_dir }}/cmdline-tools"
when: not sdkmanager_installed.stat.exists
@@ -83,4 +92,4 @@
copy:
src: "{{ android_cmdline_temp_dir }}/cmdline-tools/"
dest: "{{ android_sdk_location }}/cmdline-tools/latest"
- remote_src: yes
+ remote_src: true
diff --git a/tests/integration/targets/android_sdk/vars/Fedora.yml b/tests/integration/targets/android_sdk/vars/Fedora.yml
new file mode 100644
index 0000000000..e48443f0b5
--- /dev/null
+++ b/tests/integration/targets/android_sdk/vars/Fedora.yml
@@ -0,0 +1,6 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+openjdk_pkg: java-21-openjdk-headless
diff --git a/tests/integration/targets/android_sdk/vars/RedHat-10.yml b/tests/integration/targets/android_sdk/vars/RedHat-10.yml
new file mode 100644
index 0000000000..e48443f0b5
--- /dev/null
+++ b/tests/integration/targets/android_sdk/vars/RedHat-10.yml
@@ -0,0 +1,6 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+openjdk_pkg: java-21-openjdk-headless
diff --git a/tests/integration/targets/android_sdk/vars/RedHat-9.yml b/tests/integration/targets/android_sdk/vars/RedHat-9.yml
new file mode 100644
index 0000000000..e48443f0b5
--- /dev/null
+++ b/tests/integration/targets/android_sdk/vars/RedHat-9.yml
@@ -0,0 +1,6 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+openjdk_pkg: java-21-openjdk-headless
diff --git a/tests/integration/targets/ansible_galaxy_install/tasks/main.yml b/tests/integration/targets/ansible_galaxy_install/tasks/main.yml
index 5c4af6d167..686422c065 100644
--- a/tests/integration/targets/ansible_galaxy_install/tasks/main.yml
+++ b/tests/integration/targets/ansible_galaxy_install/tasks/main.yml
@@ -19,8 +19,8 @@
- name: Assert collection netbox.netbox was installed
assert:
that:
- - install_c0 is changed
- - '"netbox.netbox" in install_c0.new_collections'
+ - install_c0 is changed
+ - '"netbox.netbox" in install_c0.new_collections'
- name: Install collection netbox.netbox (again)
community.general.ansible_galaxy_install:
@@ -32,7 +32,7 @@
- name: Assert collection was not installed
assert:
that:
- - install_c1 is not changed
+ - install_c1 is not changed
###################################################
- name: Make directory install_r
@@ -50,8 +50,8 @@
- name: Assert collection ansistrano.deploy was installed
assert:
that:
- - install_r0 is changed
- - '"ansistrano.deploy" in install_r0.new_roles'
+ - install_r0 is changed
+ - '"ansistrano.deploy" in install_r0.new_roles'
- name: Install role ansistrano.deploy (again)
community.general.ansible_galaxy_install:
@@ -63,7 +63,7 @@
- name: Assert role was not installed
assert:
that:
- - install_r1 is not changed
+ - install_r1 is not changed
###################################################
- name: Set requirements file path
@@ -85,9 +85,9 @@
- name: Assert requirements file was installed
assert:
that:
- - install_rq0 is changed
- - '"geerlingguy.java" in install_rq0.new_roles'
- - '"geerlingguy.php_roles" in install_rq0.new_collections'
+ - install_rq0 is changed
+ - '"geerlingguy.java" in install_rq0.new_roles'
+ - '"geerlingguy.php_roles" in install_rq0.new_collections'
- name: Install from requirements file (again)
community.general.ansible_galaxy_install:
@@ -99,7 +99,7 @@
- name: Assert requirements file was not installed
assert:
that:
- - install_rq1 is not changed
+ - install_rq1 is not changed
###################################################
- name: Make directory upgrade_c
@@ -117,8 +117,8 @@
- name: Assert collection netbox.netbox was installed
assert:
that:
- - upgrade_c0 is changed
- - '"netbox.netbox" in upgrade_c0.new_collections'
+ - upgrade_c0 is changed
+ - '"netbox.netbox" in upgrade_c0.new_collections'
- name: Upgrade collection netbox.netbox
community.general.ansible_galaxy_install:
@@ -139,5 +139,5 @@
- name: Assert collection was not installed
assert:
that:
- - upgrade_c1 is changed
- - upgrade_c2 is not changed
+ - upgrade_c1 is changed
+ - upgrade_c2 is not changed
diff --git a/tests/integration/targets/apache2_module/tasks/635-apache2-misleading-warning.yml b/tests/integration/targets/apache2_module/tasks/635-apache2-misleading-warning.yml
index 5d93a9d300..c7d140b7bc 100644
--- a/tests/integration/targets/apache2_module/tasks/635-apache2-misleading-warning.yml
+++ b/tests/integration/targets/apache2_module/tasks/635-apache2-misleading-warning.yml
@@ -15,11 +15,11 @@
- assert:
that:
- - "'warnings' in disable_mpm_modules"
- - disable_mpm_modules["warnings"] == [
- "No MPM module loaded! apache2 reload AND other module actions will fail if no MPM module is loaded immediately.",
- "No MPM module loaded! apache2 reload AND other module actions will fail if no MPM module is loaded immediately."
- ]
+ - "'warnings' in disable_mpm_modules"
+ - disable_mpm_modules["warnings"] == [
+ "No MPM module loaded! apache2 reload AND other module actions will fail if no MPM module is loaded immediately.",
+ "No MPM module loaded! apache2 reload AND other module actions will fail if no MPM module is loaded immediately."
+ ]
- name: Enable MPM event module - Revert previous change
apache2_module:
@@ -44,4 +44,4 @@
- assert:
that:
- - "'warnings' not in disable_mpm_modules"
+ - "'warnings' not in disable_mpm_modules"
diff --git a/tests/integration/targets/apache2_module/tasks/actualtest.yml b/tests/integration/targets/apache2_module/tasks/actualtest.yml
index 6fd10ce572..88e063fe92 100644
--- a/tests/integration/targets/apache2_module/tasks/actualtest.yml
+++ b/tests/integration/targets/apache2_module/tasks/actualtest.yml
@@ -67,141 +67,141 @@
- name: Debian/Ubuntu specific tests
when: "ansible_os_family == 'Debian'"
block:
- - name: force disable of autoindex # bug #2499
- community.general.apache2_module:
- name: autoindex
- state: absent
- force: true
-
- - name: re-enable autoindex
- community.general.apache2_module:
- name: autoindex
- state: present
-
- # mod_evasive is enabled by default upon the installation, so disable first and enable second, to preserve the config
- - name: disable evasive module
- community.general.apache2_module:
- name: evasive
- state: absent
-
- - name: enable evasive module, test https://github.com/ansible/ansible/issues/22635
- community.general.apache2_module:
- name: evasive
- state: present
-
- - name: use identifier to enable module, fix for https://github.com/ansible/ansible/issues/33669
- community.general.apache2_module:
- name: dump_io
- state: present
- ignore_errors: true
- register: enable_dumpio_wrong
-
- - name: disable dump_io
- community.general.apache2_module:
- name: dump_io
- identifier: dumpio_module
- state: absent
-
- - name: use identifier to enable module, fix for https://github.com/ansible/ansible/issues/33669
- community.general.apache2_module:
- name: dump_io
- identifier: dumpio_module
- state: present
- register: enable_dumpio_correct_1
-
- - name: ensure idempotency with identifier
- community.general.apache2_module:
- name: dump_io
- identifier: dumpio_module
- state: present
- register: enable_dumpio_correct_2
-
- - name: disable dump_io
- community.general.apache2_module:
- name: dump_io
- identifier: dumpio_module
- state: absent
-
- - assert:
- that:
- - enable_dumpio_wrong is failed
- - enable_dumpio_correct_1 is changed
- - enable_dumpio_correct_2 is not changed
-
- - name: disable mpm modules
- community.general.apache2_module:
- name: "{{ item }}"
- state: absent
- ignore_configcheck: true
- with_items:
- - mpm_worker
- - mpm_event
- - mpm_prefork
-
- - name: enabled mpm_event
- community.general.apache2_module:
- name: mpm_event
- state: present
- ignore_configcheck: true
- register: enabledmpmevent
-
- - name: ensure changed mpm_event
- assert:
- that:
- - 'enabledmpmevent.changed'
-
- - name: switch between mpm_event and mpm_worker
- community.general.apache2_module:
- name: "{{ item.name }}"
- state: "{{ item.state }}"
- ignore_configcheck: true
- with_items:
- - name: mpm_event
+ - name: force disable of autoindex # bug #2499
+ community.general.apache2_module:
+ name: autoindex
state: absent
- - name: mpm_worker
+ force: true
+
+ - name: re-enable autoindex
+ community.general.apache2_module:
+ name: autoindex
state: present
- - name: ensure mpm_worker is already enabled
- community.general.apache2_module:
- name: mpm_worker
- state: present
- register: enabledmpmworker
+ # mod_evasive is enabled by default upon the installation, so disable first and enable second, to preserve the config
+ - name: disable evasive module
+ community.general.apache2_module:
+ name: evasive
+ state: absent
- - name: ensure mpm_worker unchanged
- assert:
- that:
- - 'not enabledmpmworker.changed'
+ - name: enable evasive module, test https://github.com/ansible/ansible/issues/22635
+ community.general.apache2_module:
+ name: evasive
+ state: present
- - name: try to disable all mpm modules with configcheck
- community.general.apache2_module:
- name: "{{item}}"
- state: absent
- with_items:
- - mpm_worker
- - mpm_event
- - mpm_prefork
- ignore_errors: true
- register: remove_with_configcheck
+ - name: use identifier to enable module, fix for https://github.com/ansible/ansible/issues/33669
+ community.general.apache2_module:
+ name: dump_io
+ state: present
+ ignore_errors: true
+ register: enable_dumpio_wrong
- - name: ensure configcheck fails task with when run without mpm modules
- assert:
- that:
- - "{{ item.failed }}"
- with_items: "{{ remove_with_configcheck.results }}"
+ - name: disable dump_io
+ community.general.apache2_module:
+ name: dump_io
+ identifier: dumpio_module
+ state: absent
- - name: try to disable all mpm modules without configcheck
- community.general.apache2_module:
- name: "{{item}}"
- state: absent
- ignore_configcheck: true
- with_items:
- - mpm_worker
- - mpm_event
- - mpm_prefork
+ - name: use identifier to enable module, fix for https://github.com/ansible/ansible/issues/33669
+ community.general.apache2_module:
+ name: dump_io
+ identifier: dumpio_module
+ state: present
+ register: enable_dumpio_correct_1
- - name: enabled mpm_event to restore previous state
- community.general.apache2_module:
- name: mpm_event
- state: present
- ignore_configcheck: true
- register: enabledmpmevent
+ - name: ensure idempotency with identifier
+ community.general.apache2_module:
+ name: dump_io
+ identifier: dumpio_module
+ state: present
+ register: enable_dumpio_correct_2
+
+ - name: disable dump_io
+ community.general.apache2_module:
+ name: dump_io
+ identifier: dumpio_module
+ state: absent
+
+ - assert:
+ that:
+ - enable_dumpio_wrong is failed
+ - enable_dumpio_correct_1 is changed
+ - enable_dumpio_correct_2 is not changed
+
+ - name: disable mpm modules
+ community.general.apache2_module:
+ name: "{{ item }}"
+ state: absent
+ ignore_configcheck: true
+ with_items:
+ - mpm_worker
+ - mpm_event
+ - mpm_prefork
+
+ - name: enabled mpm_event
+ community.general.apache2_module:
+ name: mpm_event
+ state: present
+ ignore_configcheck: true
+ register: enabledmpmevent
+
+ - name: ensure changed mpm_event
+ assert:
+ that:
+ - 'enabledmpmevent.changed'
+
+ - name: switch between mpm_event and mpm_worker
+ community.general.apache2_module:
+ name: "{{ item.name }}"
+ state: "{{ item.state }}"
+ ignore_configcheck: true
+ with_items:
+ - name: mpm_event
+ state: absent
+ - name: mpm_worker
+ state: present
+
+ - name: ensure mpm_worker is already enabled
+ community.general.apache2_module:
+ name: mpm_worker
+ state: present
+ register: enabledmpmworker
+
+ - name: ensure mpm_worker unchanged
+ assert:
+ that:
+ - 'not enabledmpmworker.changed'
+
+ - name: try to disable all mpm modules with configcheck
+ community.general.apache2_module:
+ name: "{{item}}"
+ state: absent
+ with_items:
+ - mpm_worker
+ - mpm_event
+ - mpm_prefork
+ ignore_errors: true
+ register: remove_with_configcheck
+
+ - name: ensure configcheck fails task with when run without mpm modules
+ assert:
+ that:
+ - item is failed
+ with_items: "{{ remove_with_configcheck.results }}"
+
+ - name: try to disable all mpm modules without configcheck
+ community.general.apache2_module:
+ name: "{{item}}"
+ state: absent
+ ignore_configcheck: true
+ with_items:
+ - mpm_worker
+ - mpm_event
+ - mpm_prefork
+
+ - name: enabled mpm_event to restore previous state
+ community.general.apache2_module:
+ name: mpm_event
+ state: present
+ ignore_configcheck: true
+ register: enabledmpmevent
diff --git a/tests/integration/targets/apk/tasks/main.yml b/tests/integration/targets/apk/tasks/main.yml
index 0e1b0ae429..b8e0e2efbe 100644
--- a/tests/integration/targets/apk/tasks/main.yml
+++ b/tests/integration/targets/apk/tasks/main.yml
@@ -158,3 +158,64 @@
that:
- results is not changed
- (results.packages | default([]) | length) == 0
+
+ - name: Install package with empty name
+ community.general.apk:
+ name: ""
+ register: result_empty
+ ignore_errors: true
+
+ - name: Assert failure due to empty package name
+ ansible.builtin.assert:
+ that:
+ - result_empty is failed
+ - "'Package name(s) cannot be empty or whitespace-only' == result_empty.msg"
+
+ - name: Install package name with only spaces
+ community.general.apk:
+ name: [" "]
+ register: result_spaces
+ ignore_errors: true
+
+ - name: Assert failure due to whitespace-only package name
+ ansible.builtin.assert:
+ that:
+ - result_spaces is failed
+ - "'Package name(s) cannot be empty or whitespace-only' == result_spaces.msg"
+
+ - name: Do not accept list with valid and empty string
+ community.general.apk:
+ name: ["busybox", ""]
+ register: result_invalid_mixed
+ ignore_errors: true
+
+ - name: Assert failure with mixed package list
+ ansible.builtin.assert:
+ that:
+ - result_invalid_mixed is failed
+ - "'Package name(s) cannot be empty or whitespace-only' == result_invalid_mixed.msg"
+
+ - name: Reject package name list with multiple empty/whitespace-only strings
+ community.general.apk:
+ name: ["", " "]
+ register: result_multiple_empty
+ ignore_errors: true
+
+ - name: Assert failure due to all package names being empty or whitespace
+ ansible.builtin.assert:
+ that:
+ - result_multiple_empty is failed
+ - "'Package name(s) cannot be empty or whitespace-only' == result_multiple_empty.msg"
+
+ - name: Reject empty package name with update_cache parameter
+ community.general.apk:
+ name: ""
+ update_cache: true
+ register: result_empty_package_with_update_cache
+ ignore_errors: true
+
+ - name: Assert failure due to all package names being empty or whitespace
+ ansible.builtin.assert:
+ that:
+ - result_empty_package_with_update_cache is failed
+ - "'Package name(s) cannot be empty or whitespace-only' == result_empty_package_with_update_cache.msg"
diff --git a/tests/integration/targets/btrfs_subvolume/defaults/main.yml b/tests/integration/targets/btrfs_subvolume/defaults/main.yml
index 52c88d5de1..fad7fb401a 100644
--- a/tests/integration/targets/btrfs_subvolume/defaults/main.yml
+++ b/tests/integration/targets/btrfs_subvolume/defaults/main.yml
@@ -4,15 +4,15 @@
# SPDX-License-Identifier: GPL-3.0-or-later
btrfs_subvolume_single_configs:
-- file: "/tmp/disks0.img"
- loop: "/dev/loop95"
+ - file: "/tmp/disks0.img"
+ loop: "/dev/loop95"
btrfs_subvolume_multiple_configs:
-- file: "/tmp/diskm0.img"
- loop: "/dev/loop97"
-- file: "/tmp/diskm1.img"
- loop: "/dev/loop98"
-- file: "/tmp/diskm2.img"
- loop: "/dev/loop99"
+ - file: "/tmp/diskm0.img"
+ loop: "/dev/loop97"
+ - file: "/tmp/diskm1.img"
+ loop: "/dev/loop98"
+ - file: "/tmp/diskm2.img"
+ loop: "/dev/loop99"
btrfs_subvolume_configs: "{{ btrfs_subvolume_single_configs + btrfs_subvolume_multiple_configs }}"
btrfs_subvolume_single_devices: "{{ btrfs_subvolume_single_configs | map(attribute='loop') }}"
btrfs_subvolume_single_label: "single"
diff --git a/tests/integration/targets/btrfs_subvolume/tasks/main.yml b/tests/integration/targets/btrfs_subvolume/tasks/main.yml
index d472704401..f97b6643a8 100644
--- a/tests/integration/targets/btrfs_subvolume/tasks/main.yml
+++ b/tests/integration/targets/btrfs_subvolume/tasks/main.yml
@@ -8,22 +8,22 @@
name:
- btrfs-progs # btrfs userspace
- util-linux # losetup
- ignore_errors: True
+ ignore_errors: true
register: btrfs_installed
- name: Execute integration tests tests
block:
- - ansible.builtin.include_tasks: 'setup.yml'
+ - ansible.builtin.include_tasks: 'setup.yml'
- - name: "Execute test scenario for single device filesystem"
- ansible.builtin.include_tasks: 'run_filesystem_tests.yml'
- vars:
- btrfs_subvolume_target_device: "{{ btrfs_subvolume_single_devices | first }}"
- btrfs_subvolume_target_label: "{{ btrfs_subvolume_single_label }}"
+ - name: "Execute test scenario for single device filesystem"
+ ansible.builtin.include_tasks: 'run_filesystem_tests.yml'
+ vars:
+ btrfs_subvolume_target_device: "{{ btrfs_subvolume_single_devices | first }}"
+ btrfs_subvolume_target_label: "{{ btrfs_subvolume_single_label }}"
- - name: "Execute test scenario for multiple device configuration"
- ansible.builtin.include_tasks: 'run_filesystem_tests.yml'
- vars:
- btrfs_subvolume_target_device: "{{ btrfs_subvolume_multiple_devices | first }}"
- btrfs_subvolume_target_label: "{{ btrfs_subvolume_multiple_label }}"
+ - name: "Execute test scenario for multiple device configuration"
+ ansible.builtin.include_tasks: 'run_filesystem_tests.yml'
+ vars:
+ btrfs_subvolume_target_device: "{{ btrfs_subvolume_multiple_devices | first }}"
+ btrfs_subvolume_target_label: "{{ btrfs_subvolume_multiple_label }}"
when: btrfs_installed is success
diff --git a/tests/integration/targets/btrfs_subvolume/tasks/run_filesystem_tests.yml b/tests/integration/targets/btrfs_subvolume/tasks/run_filesystem_tests.yml
index 0ea3fa6660..137f97a235 100644
--- a/tests/integration/targets/btrfs_subvolume/tasks/run_filesystem_tests.yml
+++ b/tests/integration/targets/btrfs_subvolume/tasks/run_filesystem_tests.yml
@@ -10,23 +10,23 @@
- name: "Execute test scenarios where non-root subvolume is mounted"
block:
- - name: Create subvolume '/nonroot'
- community.general.btrfs_subvolume:
- automount: Yes
- name: "/nonroot"
- filesystem_label: "{{ btrfs_subvolume_target_label }}"
- state: "present"
- register: nonroot
- - name: "Mount subvolume '/nonroot'"
- ansible.posix.mount:
- src: "{{ nonroot.filesystem.devices | first }}"
- path: /mnt
- opts: "subvolid={{ nonroot.target_subvolume_id }}"
- fstype: btrfs
- state: mounted
- - name: "Run tests for explicit, mounted single device configuration"
- ansible.builtin.include_tasks: 'run_common_tests.yml'
- - name: "Unmount subvolume /nonroot"
- ansible.posix.mount:
- path: /mnt
- state: absent
+ - name: Create subvolume '/nonroot'
+ community.general.btrfs_subvolume:
+ automount: true
+ name: "/nonroot"
+ filesystem_label: "{{ btrfs_subvolume_target_label }}"
+ state: "present"
+ register: nonroot
+ - name: "Mount subvolume '/nonroot'"
+ ansible.posix.mount:
+ src: "{{ nonroot.filesystem.devices | first }}"
+ path: /mnt
+ opts: "subvolid={{ nonroot.target_subvolume_id }}"
+ fstype: btrfs
+ state: mounted
+ - name: "Run tests for explicit, mounted single device configuration"
+ ansible.builtin.include_tasks: 'run_common_tests.yml'
+ - name: "Unmount subvolume /nonroot"
+ ansible.posix.mount:
+ path: /mnt
+ state: absent
diff --git a/tests/integration/targets/btrfs_subvolume/tasks/setup.yml b/tests/integration/targets/btrfs_subvolume/tasks/setup.yml
index f5bbdf9c54..f5d03a1779 100644
--- a/tests/integration/targets/btrfs_subvolume/tasks/setup.yml
+++ b/tests/integration/targets/btrfs_subvolume/tasks/setup.yml
@@ -18,12 +18,12 @@
- name: Create single device btrfs filesystem
ansible.builtin.command:
cmd: "mkfs.btrfs --label {{ btrfs_subvolume_single_label }} -f {{ btrfs_subvolume_single_devices | first }}"
- changed_when: True
+ changed_when: true
- name: Create multiple device btrfs filesystem
ansible.builtin.command:
cmd: "mkfs.btrfs --label {{ btrfs_subvolume_multiple_label }} -f -d raid0 {{ btrfs_subvolume_multiple_devices | join(' ') }}"
- changed_when: True
+ changed_when: true
# Typically created by udev, but apparently missing on Alpine
- name: Create btrfs control device node
@@ -34,4 +34,4 @@
- name: Force rescan to ensure all device are detected
ansible.builtin.command:
cmd: "btrfs device scan"
- changed_when: True
+ changed_when: true
diff --git a/tests/integration/targets/btrfs_subvolume/tasks/test_filesystem_matching.yml b/tests/integration/targets/btrfs_subvolume/tasks/test_filesystem_matching.yml
index 2455eeacf1..b00c033bcf 100644
--- a/tests/integration/targets/btrfs_subvolume/tasks/test_filesystem_matching.yml
+++ b/tests/integration/targets/btrfs_subvolume/tasks/test_filesystem_matching.yml
@@ -5,76 +5,76 @@
- name: "Match targeted filesystem by label"
block:
- - name: Match '{{ btrfs_subvolume_target_label }}' filesystem by label
- community.general.btrfs_subvolume:
- automount: Yes
- name: "/match_label"
- filesystem_label: "{{ btrfs_subvolume_target_label }}"
- state: "present"
- register: result
+ - name: Match '{{ btrfs_subvolume_target_label }}' filesystem by label
+ community.general.btrfs_subvolume:
+ automount: true
+ name: "/match_label"
+ filesystem_label: "{{ btrfs_subvolume_target_label }}"
+ state: "present"
+ register: result
- - name: Validate the '{{ btrfs_subvolume_target_label }}' filesystem was chosen
- ansible.builtin.assert:
- that:
- - result.filesystem.label == btrfs_subvolume_target_label
+ - name: Validate the '{{ btrfs_subvolume_target_label }}' filesystem was chosen
+ ansible.builtin.assert:
+ that:
+ - result.filesystem.label == btrfs_subvolume_target_label
- name: "Match targeted filesystem by uuid"
block:
- - name: Match '{{ btrfs_subvolume_target_label }}' filesystem by uuid
- community.general.btrfs_subvolume:
- automount: Yes
- name: "/match_uuid"
- filesystem_uuid: "{{ result.filesystem.uuid }}"
- state: "present"
- register: result
+ - name: Match '{{ btrfs_subvolume_target_label }}' filesystem by uuid
+ community.general.btrfs_subvolume:
+ automount: true
+ name: "/match_uuid"
+ filesystem_uuid: "{{ result.filesystem.uuid }}"
+ state: "present"
+ register: result
- - name: Validate the '{{ btrfs_subvolume_target_label }}' filesystem was chosen
- ansible.builtin.assert:
- that:
- - result.filesystem.label == btrfs_subvolume_target_label
+ - name: Validate the '{{ btrfs_subvolume_target_label }}' filesystem was chosen
+ ansible.builtin.assert:
+ that:
+ - result.filesystem.label == btrfs_subvolume_target_label
- name: "Match targeted filesystem by devices"
block:
- - name: Match '{{ btrfs_subvolume_target_label }}' filesystem by device
- community.general.btrfs_subvolume:
- automount: Yes
- name: "/match_device"
- filesystem_device: "{{ result.filesystem.devices | first }}"
- state: "present"
- register: result
+ - name: Match '{{ btrfs_subvolume_target_label }}' filesystem by device
+ community.general.btrfs_subvolume:
+ automount: true
+ name: "/match_device"
+ filesystem_device: "{{ result.filesystem.devices | first }}"
+ state: "present"
+ register: result
- - name: Validate the '{{ btrfs_subvolume_target_label }}' filesystem was chosen
- ansible.builtin.assert:
- that:
- - result.filesystem.label == btrfs_subvolume_target_label
+ - name: Validate the '{{ btrfs_subvolume_target_label }}' filesystem was chosen
+ ansible.builtin.assert:
+ that:
+ - result.filesystem.label == btrfs_subvolume_target_label
- name: "Match only mounted filesystem"
block:
- - name: "Mount filesystem '{{ btrfs_subvolume_target_label }}'"
- ansible.posix.mount:
- src: "{{ result.filesystem.devices | first }}"
- path: /mnt
- opts: "subvolid={{ 5 }}"
- fstype: btrfs
- state: mounted
+ - name: "Mount filesystem '{{ btrfs_subvolume_target_label }}'"
+ ansible.posix.mount:
+ src: "{{ result.filesystem.devices | first }}"
+ path: /mnt
+ opts: "subvolid={{ 5 }}"
+ fstype: btrfs
+ state: mounted
- - name: Print current status
- community.general.btrfs_info:
+ - name: Print current status
+ community.general.btrfs_info:
- - name: Match '{{ btrfs_subvolume_target_label }}' filesystem when only mount
- community.general.btrfs_subvolume:
- automount: Yes
- name: "/match_only_mounted"
- state: "present"
- register: result
+ - name: Match '{{ btrfs_subvolume_target_label }}' filesystem when only mount
+ community.general.btrfs_subvolume:
+ automount: true
+ name: "/match_only_mounted"
+ state: "present"
+ register: result
- - name: "Unmount filesystem '{{ btrfs_subvolume_target_label }}'"
- ansible.posix.mount:
- path: /mnt
- state: absent
+ - name: "Unmount filesystem '{{ btrfs_subvolume_target_label }}'"
+ ansible.posix.mount:
+ path: /mnt
+ state: absent
- - name: Validate the '{{ btrfs_subvolume_target_label }}' filesystem was chosen
- ansible.builtin.assert:
- that:
- - result.filesystem.label == btrfs_subvolume_target_label
- when: False # TODO don't attempt this if the host already has a pre-existing btrfs filesystem
+ - name: Validate the '{{ btrfs_subvolume_target_label }}' filesystem was chosen
+ ansible.builtin.assert:
+ that:
+ - result.filesystem.label == btrfs_subvolume_target_label
+ when: false # TODO don't attempt this if the host already has a pre-existing btrfs filesystem
diff --git a/tests/integration/targets/btrfs_subvolume/tasks/test_snapshot_clobber.yml b/tests/integration/targets/btrfs_subvolume/tasks/test_snapshot_clobber.yml
index ce25a999ba..f0224b23f1 100644
--- a/tests/integration/targets/btrfs_subvolume/tasks/test_snapshot_clobber.yml
+++ b/tests/integration/targets/btrfs_subvolume/tasks/test_snapshot_clobber.yml
@@ -5,37 +5,37 @@
- name: Create a snapshot, overwriting if one already exists at path
block:
- - name: Create a snapshot named 'snapshot_clobber'
- community.general.btrfs_subvolume:
- automount: Yes
- filesystem_label: "{{ btrfs_subvolume_target_label }}"
- name: "/snapshot_clobber"
- snapshot_source: "/"
- snapshot_conflict: "clobber"
- state: "present"
- register: result
- - name: Snapshot 'snapshot_clobber' created
- ansible.builtin.assert:
- that:
- - result is changed
+ - name: Create a snapshot named 'snapshot_clobber'
+ community.general.btrfs_subvolume:
+ automount: true
+ filesystem_label: "{{ btrfs_subvolume_target_label }}"
+ name: "/snapshot_clobber"
+ snapshot_source: "/"
+ snapshot_conflict: "clobber"
+ state: "present"
+ register: result
+ - name: Snapshot 'snapshot_clobber' created
+ ansible.builtin.assert:
+ that:
+ - result is changed
- - name: Create a snapshot named 'snapshot_clobber' (no idempotency)
- community.general.btrfs_subvolume:
- automount: Yes
- filesystem_label: "{{ btrfs_subvolume_target_label }}"
- name: "/snapshot_clobber"
- snapshot_source: "/"
- snapshot_conflict: "clobber"
- state: "present"
- register: result
- - name: Snapshot 'snapshot_clobber' created (no idempotency)
- ansible.builtin.assert:
- that:
- - result is changed
+ - name: Create a snapshot named 'snapshot_clobber' (no idempotency)
+ community.general.btrfs_subvolume:
+ automount: true
+ filesystem_label: "{{ btrfs_subvolume_target_label }}"
+ name: "/snapshot_clobber"
+ snapshot_source: "/"
+ snapshot_conflict: "clobber"
+ state: "present"
+ register: result
+ - name: Snapshot 'snapshot_clobber' created (no idempotency)
+ ansible.builtin.assert:
+ that:
+ - result is changed
- name: Cleanup created snapshot
community.general.btrfs_subvolume:
- automount: Yes
+ automount: true
filesystem_label: "{{ btrfs_subvolume_target_label }}"
name: "/snapshot_clobber"
state: "absent"
diff --git a/tests/integration/targets/btrfs_subvolume/tasks/test_snapshot_error.yml b/tests/integration/targets/btrfs_subvolume/tasks/test_snapshot_error.yml
index 49d928b74c..1399acfd4b 100644
--- a/tests/integration/targets/btrfs_subvolume/tasks/test_snapshot_error.yml
+++ b/tests/integration/targets/btrfs_subvolume/tasks/test_snapshot_error.yml
@@ -5,38 +5,38 @@
- name: Create a snapshot, erroring if one already exists at path
block:
- - name: Create a snapshot named 'snapshot_error'
- community.general.btrfs_subvolume:
- automount: Yes
- filesystem_label: "{{ btrfs_subvolume_target_label }}"
- name: "/snapshot_error"
- snapshot_source: "/"
- snapshot_conflict: "error"
- state: "present"
- register: result
- - name: Snapshot 'snapshot_error' created
- ansible.builtin.assert:
- that:
- - result is changed
+ - name: Create a snapshot named 'snapshot_error'
+ community.general.btrfs_subvolume:
+ automount: true
+ filesystem_label: "{{ btrfs_subvolume_target_label }}"
+ name: "/snapshot_error"
+ snapshot_source: "/"
+ snapshot_conflict: "error"
+ state: "present"
+ register: result
+ - name: Snapshot 'snapshot_error' created
+ ansible.builtin.assert:
+ that:
+ - result is changed
- - name: Create a snapshot named 'snapshot_error' (no idempotency)
- community.general.btrfs_subvolume:
- automount: Yes
- filesystem_label: "{{ btrfs_subvolume_target_label }}"
- name: "/snapshot_error"
- snapshot_source: "/"
- snapshot_conflict: "error"
- state: "present"
- register: result
- ignore_errors: true
- - name: Snapshot 'snapshot_error' created (no idempotency)
- ansible.builtin.assert:
- that:
- - result is not changed
+ - name: Create a snapshot named 'snapshot_error' (no idempotency)
+ community.general.btrfs_subvolume:
+ automount: true
+ filesystem_label: "{{ btrfs_subvolume_target_label }}"
+ name: "/snapshot_error"
+ snapshot_source: "/"
+ snapshot_conflict: "error"
+ state: "present"
+ register: result
+ ignore_errors: true
+ - name: Snapshot 'snapshot_error' created (no idempotency)
+ ansible.builtin.assert:
+ that:
+ - result is not changed
- name: Cleanup created snapshot
community.general.btrfs_subvolume:
- automount: Yes
+ automount: true
filesystem_label: "{{ btrfs_subvolume_target_label }}"
name: "/snapshot_error"
state: "absent"
diff --git a/tests/integration/targets/btrfs_subvolume/tasks/test_snapshot_skip.yml b/tests/integration/targets/btrfs_subvolume/tasks/test_snapshot_skip.yml
index 07e65b133c..33cd46ecce 100644
--- a/tests/integration/targets/btrfs_subvolume/tasks/test_snapshot_skip.yml
+++ b/tests/integration/targets/btrfs_subvolume/tasks/test_snapshot_skip.yml
@@ -5,37 +5,37 @@
- name: Create a snapshot if one does not already exist at path
block:
- - name: Create a snapshot named 'snapshot_skip'
- community.general.btrfs_subvolume:
- automount: Yes
- filesystem_label: "{{ btrfs_subvolume_target_label }}"
- name: "/snapshot_skip"
- snapshot_source: "/"
- snapshot_conflict: "skip"
- state: "present"
- register: result
- - name: Snapshot 'snapshot_skip' created
- ansible.builtin.assert:
- that:
- - result is changed
+ - name: Create a snapshot named 'snapshot_skip'
+ community.general.btrfs_subvolume:
+ automount: true
+ filesystem_label: "{{ btrfs_subvolume_target_label }}"
+ name: "/snapshot_skip"
+ snapshot_source: "/"
+ snapshot_conflict: "skip"
+ state: "present"
+ register: result
+ - name: Snapshot 'snapshot_skip' created
+ ansible.builtin.assert:
+ that:
+ - result is changed
- - name: Create a snapshot named 'snapshot_skip' (idempotency)
- community.general.btrfs_subvolume:
- automount: Yes
- filesystem_label: "{{ btrfs_subvolume_target_label }}"
- name: "/snapshot_skip"
- snapshot_source: "/"
- snapshot_conflict: "skip"
- state: "present"
- register: result
- - name: Snapshot 'snapshot_skip' created (idempotency)
- ansible.builtin.assert:
- that:
- - result is not changed
+ - name: Create a snapshot named 'snapshot_skip' (idempotency)
+ community.general.btrfs_subvolume:
+ automount: true
+ filesystem_label: "{{ btrfs_subvolume_target_label }}"
+ name: "/snapshot_skip"
+ snapshot_source: "/"
+ snapshot_conflict: "skip"
+ state: "present"
+ register: result
+ - name: Snapshot 'snapshot_skip' created (idempotency)
+ ansible.builtin.assert:
+ that:
+ - result is not changed
- name: Cleanup created snapshot
community.general.btrfs_subvolume:
- automount: Yes
+ automount: true
filesystem_label: "{{ btrfs_subvolume_target_label }}"
name: "/snapshot_skip"
state: "absent"
diff --git a/tests/integration/targets/btrfs_subvolume/tasks/test_subvolume_default.yml b/tests/integration/targets/btrfs_subvolume/tasks/test_subvolume_default.yml
index f6eed93878..a506d56129 100644
--- a/tests/integration/targets/btrfs_subvolume/tasks/test_subvolume_default.yml
+++ b/tests/integration/targets/btrfs_subvolume/tasks/test_subvolume_default.yml
@@ -5,95 +5,95 @@
- name: Change the default subvolume
block:
- - name: Update filesystem default subvolume to '@'
- community.general.btrfs_subvolume:
- automount: Yes
- default: True
- filesystem_label: "{{ btrfs_subvolume_target_label }}"
- name: "/@"
- state: "present"
- register: result
- - name: Subvolume '@' set to default
- ansible.builtin.assert:
- that:
- - result is changed
- - name: Update filesystem default subvolume to '@' (idempotency)
- community.general.btrfs_subvolume:
- automount: Yes
- default: True
- filesystem_label: "{{ btrfs_subvolume_target_label }}"
- name: "/@"
- state: "present"
- register: result
- - name: Subvolume '@' set to default (idempotency)
- ansible.builtin.assert:
- that:
- - result is not changed
+ - name: Update filesystem default subvolume to '@'
+ community.general.btrfs_subvolume:
+ automount: true
+ default: true
+ filesystem_label: "{{ btrfs_subvolume_target_label }}"
+ name: "/@"
+ state: "present"
+ register: result
+ - name: Subvolume '@' set to default
+ ansible.builtin.assert:
+ that:
+ - result is changed
+ - name: Update filesystem default subvolume to '@' (idempotency)
+ community.general.btrfs_subvolume:
+ automount: true
+ default: true
+ filesystem_label: "{{ btrfs_subvolume_target_label }}"
+ name: "/@"
+ state: "present"
+ register: result
+ - name: Subvolume '@' set to default (idempotency)
+ ansible.builtin.assert:
+ that:
+ - result is not changed
- name: Revert the default subvolume
block:
- - name: Revert filesystem default subvolume to '/'
- community.general.btrfs_subvolume:
- automount: Yes
- default: True
- filesystem_label: "{{ btrfs_subvolume_target_label }}"
- name: "/"
- state: "present"
- register: result
- - name: Subvolume '/' set to default
- ansible.builtin.assert:
- that:
- - result is changed
- - name: Revert filesystem default subvolume to '/' (idempotency)
- community.general.btrfs_subvolume:
- automount: Yes
- default: True
- filesystem_label: "{{ btrfs_subvolume_target_label }}"
- name: "/"
- state: "present"
- register: result
- - name: Subvolume '/' set to default (idempotency)
- ansible.builtin.assert:
- that:
- - result is not changed
+ - name: Revert filesystem default subvolume to '/'
+ community.general.btrfs_subvolume:
+ automount: true
+ default: true
+ filesystem_label: "{{ btrfs_subvolume_target_label }}"
+ name: "/"
+ state: "present"
+ register: result
+ - name: Subvolume '/' set to default
+ ansible.builtin.assert:
+ that:
+ - result is changed
+ - name: Revert filesystem default subvolume to '/' (idempotency)
+ community.general.btrfs_subvolume:
+ automount: true
+ default: true
+ filesystem_label: "{{ btrfs_subvolume_target_label }}"
+ name: "/"
+ state: "present"
+ register: result
+ - name: Subvolume '/' set to default (idempotency)
+ ansible.builtin.assert:
+ that:
+ - result is not changed
- name: Change the default subvolume again
block:
- - name: Update filesystem default subvolume to '@'
- community.general.btrfs_subvolume:
- automount: Yes
- default: True
- filesystem_label: "{{ btrfs_subvolume_target_label }}"
- name: "/@"
- state: "present"
- register: result
- - name: Subvolume '@' set to default
- ansible.builtin.assert:
- that:
- - result is changed
+ - name: Update filesystem default subvolume to '@'
+ community.general.btrfs_subvolume:
+ automount: true
+ default: true
+ filesystem_label: "{{ btrfs_subvolume_target_label }}"
+ name: "/@"
+ state: "present"
+ register: result
+ - name: Subvolume '@' set to default
+ ansible.builtin.assert:
+ that:
+ - result is changed
- name: Revert custom default subvolume to fs_tree root when deleted
block:
- - name: Delete custom default subvolume '@'
- community.general.btrfs_subvolume:
- automount: Yes
- filesystem_label: "{{ btrfs_subvolume_target_label }}"
- name: "/@"
- state: "absent"
- register: result
- - name: Subvolume '@' deleted
- ansible.builtin.assert:
- that:
- - result is changed
- - name: Delete custom default subvolume '@' (idempotency)
- community.general.btrfs_subvolume:
- automount: Yes
- filesystem_label: "{{ btrfs_subvolume_target_label }}"
- name: "/@"
- state: "absent"
- register: result
- - name: Subvolume '@' deleted (idempotency)
- ansible.builtin.assert:
- that:
- - result is not changed
+ - name: Delete custom default subvolume '@'
+ community.general.btrfs_subvolume:
+ automount: true
+ filesystem_label: "{{ btrfs_subvolume_target_label }}"
+ name: "/@"
+ state: "absent"
+ register: result
+ - name: Subvolume '@' deleted
+ ansible.builtin.assert:
+ that:
+ - result is changed
+ - name: Delete custom default subvolume '@' (idempotency)
+ community.general.btrfs_subvolume:
+ automount: true
+ filesystem_label: "{{ btrfs_subvolume_target_label }}"
+ name: "/@"
+ state: "absent"
+ register: result
+ - name: Subvolume '@' deleted (idempotency)
+ ansible.builtin.assert:
+ that:
+ - result is not changed
diff --git a/tests/integration/targets/btrfs_subvolume/tasks/test_subvolume_nested.yml b/tests/integration/targets/btrfs_subvolume/tasks/test_subvolume_nested.yml
index b706bf72a8..a5c152f9ef 100644
--- a/tests/integration/targets/btrfs_subvolume/tasks/test_subvolume_nested.yml
+++ b/tests/integration/targets/btrfs_subvolume/tasks/test_subvolume_nested.yml
@@ -5,57 +5,57 @@
- name: Create parent subvolume 'container'
community.general.btrfs_subvolume:
- automount: Yes
+ automount: true
filesystem_label: "{{ btrfs_subvolume_target_label }}"
name: "/container"
state: "present"
- name: Create a nested subvolume
block:
- - name: Create a subvolume named 'nested' inside 'container'
- community.general.btrfs_subvolume:
- automount: Yes
- filesystem_label: "{{ btrfs_subvolume_target_label }}"
- name: "/container/nested"
- state: "present"
- register: result
- - name: Subvolume 'container/nested' created
- ansible.builtin.assert:
- that:
- - result is changed
- - name: Create a subvolume named 'nested' inside 'container' (idempotency)
- community.general.btrfs_subvolume:
- automount: Yes
- filesystem_label: "{{ btrfs_subvolume_target_label }}"
- name: "/container/nested"
- state: "present"
- register: result
- - name: Subvolume 'container/nested' created (idempotency)
- ansible.builtin.assert:
- that:
- - result is not changed
+ - name: Create a subvolume named 'nested' inside 'container'
+ community.general.btrfs_subvolume:
+ automount: true
+ filesystem_label: "{{ btrfs_subvolume_target_label }}"
+ name: "/container/nested"
+ state: "present"
+ register: result
+ - name: Subvolume 'container/nested' created
+ ansible.builtin.assert:
+ that:
+ - result is changed
+ - name: Create a subvolume named 'nested' inside 'container' (idempotency)
+ community.general.btrfs_subvolume:
+ automount: true
+ filesystem_label: "{{ btrfs_subvolume_target_label }}"
+ name: "/container/nested"
+ state: "present"
+ register: result
+ - name: Subvolume 'container/nested' created (idempotency)
+ ansible.builtin.assert:
+ that:
+ - result is not changed
- name: Remove a nested subvolume
block:
- - name: Remove a subvolume named 'nested' inside 'container'
- community.general.btrfs_subvolume:
- automount: Yes
- filesystem_label: "{{ btrfs_subvolume_target_label }}"
- name: "/container/nested"
- state: "absent"
- register: result
- - name: Subvolume 'container/nested' removed
- ansible.builtin.assert:
- that:
- - result is changed
- - name: Remove a subvolume named 'nested' inside 'container' (idempotency)
- community.general.btrfs_subvolume:
- automount: Yes
- filesystem_label: "{{ btrfs_subvolume_target_label }}"
- name: "/container/nested"
- state: "absent"
- register: result
- - name: Subvolume 'container/nested' removed (idempotency)
- ansible.builtin.assert:
- that:
- - result is not changed
+ - name: Remove a subvolume named 'nested' inside 'container'
+ community.general.btrfs_subvolume:
+ automount: true
+ filesystem_label: "{{ btrfs_subvolume_target_label }}"
+ name: "/container/nested"
+ state: "absent"
+ register: result
+ - name: Subvolume 'container/nested' removed
+ ansible.builtin.assert:
+ that:
+ - result is changed
+ - name: Remove a subvolume named 'nested' inside 'container' (idempotency)
+ community.general.btrfs_subvolume:
+ automount: true
+ filesystem_label: "{{ btrfs_subvolume_target_label }}"
+ name: "/container/nested"
+ state: "absent"
+ register: result
+ - name: Subvolume 'container/nested' removed (idempotency)
+ ansible.builtin.assert:
+ that:
+ - result is not changed
diff --git a/tests/integration/targets/btrfs_subvolume/tasks/test_subvolume_recursive.yml b/tests/integration/targets/btrfs_subvolume/tasks/test_subvolume_recursive.yml
index 7e9f990070..a0b86a11ac 100644
--- a/tests/integration/targets/btrfs_subvolume/tasks/test_subvolume_recursive.yml
+++ b/tests/integration/targets/btrfs_subvolume/tasks/test_subvolume_recursive.yml
@@ -5,82 +5,82 @@
- name: Recursively create subvolumes
block:
- - name: Create a subvolume named '/recursive/son/grandson'
- community.general.btrfs_subvolume:
- automount: Yes
- filesystem_label: "{{ btrfs_subvolume_target_label }}"
- name: "/recursive/son/grandson"
- recursive: Yes
- state: "present"
- register: result
- - name: Subvolume named '/recursive/son/grandson' created
- ansible.builtin.assert:
- that:
- - result is changed
+ - name: Create a subvolume named '/recursive/son/grandson'
+ community.general.btrfs_subvolume:
+ automount: true
+ filesystem_label: "{{ btrfs_subvolume_target_label }}"
+ name: "/recursive/son/grandson"
+ recursive: true
+ state: "present"
+ register: result
+ - name: Subvolume named '/recursive/son/grandson' created
+ ansible.builtin.assert:
+ that:
+ - result is changed
- - name: Create a subvolume named '/recursive/son/grandson' (idempotency)
- community.general.btrfs_subvolume:
- automount: Yes
- filesystem_label: "{{ btrfs_subvolume_target_label }}"
- name: "/recursive/son/grandson"
- recursive: Yes
- state: "present"
- register: result
- - name: Subvolume named '/recursive/son/grandson' created (idempotency)
- ansible.builtin.assert:
- that:
- - result is not changed
+ - name: Create a subvolume named '/recursive/son/grandson' (idempotency)
+ community.general.btrfs_subvolume:
+ automount: true
+ filesystem_label: "{{ btrfs_subvolume_target_label }}"
+ name: "/recursive/son/grandson"
+ recursive: true
+ state: "present"
+ register: result
+ - name: Subvolume named '/recursive/son/grandson' created (idempotency)
+ ansible.builtin.assert:
+ that:
+ - result is not changed
- - name: Create a subvolume named '/recursive/daughter/granddaughter'
- community.general.btrfs_subvolume:
- automount: Yes
- filesystem_label: "{{ btrfs_subvolume_target_label }}"
- name: "/recursive/daughter/granddaughter"
- recursive: Yes
- state: "present"
- register: result
- - name: Subvolume named '/recursive/son/grandson' created
- ansible.builtin.assert:
- that:
- - result is changed
+ - name: Create a subvolume named '/recursive/daughter/granddaughter'
+ community.general.btrfs_subvolume:
+ automount: true
+ filesystem_label: "{{ btrfs_subvolume_target_label }}"
+ name: "/recursive/daughter/granddaughter"
+ recursive: true
+ state: "present"
+ register: result
+ - name: Subvolume named '/recursive/son/grandson' created
+ ansible.builtin.assert:
+ that:
+ - result is changed
- - name: Create a subvolume named '/recursive/daughter/granddaughter' (idempotency)
- community.general.btrfs_subvolume:
- automount: Yes
- filesystem_label: "{{ btrfs_subvolume_target_label }}"
- name: "/recursive/daughter/granddaughter"
- recursive: Yes
- state: "present"
- register: result
- - name: Subvolume named '/recursive/son/grandson' created (idempotency)
- ansible.builtin.assert:
- that:
- - result is not changed
+ - name: Create a subvolume named '/recursive/daughter/granddaughter' (idempotency)
+ community.general.btrfs_subvolume:
+ automount: true
+ filesystem_label: "{{ btrfs_subvolume_target_label }}"
+ name: "/recursive/daughter/granddaughter"
+ recursive: true
+ state: "present"
+ register: result
+ - name: Subvolume named '/recursive/son/grandson' created (idempotency)
+ ansible.builtin.assert:
+ that:
+ - result is not changed
- name: Recursively remove subvolumes
block:
- - name: Remove subvolume '/recursive' and all descendents
- community.general.btrfs_subvolume:
- automount: Yes
- filesystem_label: "{{ btrfs_subvolume_target_label }}"
- name: "/recursive"
- recursive: Yes
- state: "absent"
- register: result
- - name: Subvolume '/recursive' removed
- ansible.builtin.assert:
- that:
- - result is changed
+ - name: Remove subvolume '/recursive' and all descendents
+ community.general.btrfs_subvolume:
+ automount: true
+ filesystem_label: "{{ btrfs_subvolume_target_label }}"
+ name: "/recursive"
+ recursive: true
+ state: "absent"
+ register: result
+ - name: Subvolume '/recursive' removed
+ ansible.builtin.assert:
+ that:
+ - result is changed
- - name: Remove subvolume '/recursive' and all descendents (idempotency)
- community.general.btrfs_subvolume:
- automount: Yes
- filesystem_label: "{{ btrfs_subvolume_target_label }}"
- name: "/recursive"
- recursive: Yes
- state: "absent"
- register: result
- - name: Subvolume '/recursive' removed (idempotency)
- ansible.builtin.assert:
- that:
- - result is not changed
+ - name: Remove subvolume '/recursive' and all descendents (idempotency)
+ community.general.btrfs_subvolume:
+ automount: true
+ filesystem_label: "{{ btrfs_subvolume_target_label }}"
+ name: "/recursive"
+ recursive: true
+ state: "absent"
+ register: result
+ - name: Subvolume '/recursive' removed (idempotency)
+ ansible.builtin.assert:
+ that:
+ - result is not changed
diff --git a/tests/integration/targets/btrfs_subvolume/tasks/test_subvolume_simple.yml b/tests/integration/targets/btrfs_subvolume/tasks/test_subvolume_simple.yml
index 6cd214e747..bde385aecd 100644
--- a/tests/integration/targets/btrfs_subvolume/tasks/test_subvolume_simple.yml
+++ b/tests/integration/targets/btrfs_subvolume/tasks/test_subvolume_simple.yml
@@ -5,50 +5,50 @@
- name: Create a simple subvolume
block:
- - name: Create a subvolume named 'simple'
- community.general.btrfs_subvolume:
- automount: Yes
- filesystem_label: "{{ btrfs_subvolume_target_label }}"
- name: "/simple"
- state: "present"
- register: result
- - name: Subvolume named 'simple' created
- ansible.builtin.assert:
- that:
- - result is changed
- - name: Create a subvolume named 'simple' (idempotency)
- community.general.btrfs_subvolume:
- automount: Yes
- filesystem_label: "{{ btrfs_subvolume_target_label }}"
- name: "/simple"
- state: "present"
- register: result
- - name: Subvolume named 'simple' created (idempotency)
- ansible.builtin.assert:
- that:
- - result is not changed
+ - name: Create a subvolume named 'simple'
+ community.general.btrfs_subvolume:
+ automount: true
+ filesystem_label: "{{ btrfs_subvolume_target_label }}"
+ name: "/simple"
+ state: "present"
+ register: result
+ - name: Subvolume named 'simple' created
+ ansible.builtin.assert:
+ that:
+ - result is changed
+ - name: Create a subvolume named 'simple' (idempotency)
+ community.general.btrfs_subvolume:
+ automount: true
+ filesystem_label: "{{ btrfs_subvolume_target_label }}"
+ name: "/simple"
+ state: "present"
+ register: result
+ - name: Subvolume named 'simple' created (idempotency)
+ ansible.builtin.assert:
+ that:
+ - result is not changed
- name: Remove a simple subvolume
block:
- - name: Remove a subvolume named 'simple'
- community.general.btrfs_subvolume:
- automount: Yes
- filesystem_label: "{{ btrfs_subvolume_target_label }}"
- name: "/simple"
- state: "absent"
- register: result
- - name: Subvolume named 'simple' removed
- ansible.builtin.assert:
- that:
- - result is changed
- - name: Remove a subvolume named 'simple' (idempotency)
- community.general.btrfs_subvolume:
- automount: Yes
- filesystem_label: "{{ btrfs_subvolume_target_label }}"
- name: "/simple"
- state: "absent"
- register: result
- - name: Subvolume named 'simple' removed (idempotency)
- ansible.builtin.assert:
- that:
- - result is not changed
+ - name: Remove a subvolume named 'simple'
+ community.general.btrfs_subvolume:
+ automount: true
+ filesystem_label: "{{ btrfs_subvolume_target_label }}"
+ name: "/simple"
+ state: "absent"
+ register: result
+ - name: Subvolume named 'simple' removed
+ ansible.builtin.assert:
+ that:
+ - result is changed
+ - name: Remove a subvolume named 'simple' (idempotency)
+ community.general.btrfs_subvolume:
+ automount: true
+ filesystem_label: "{{ btrfs_subvolume_target_label }}"
+ name: "/simple"
+ state: "absent"
+ register: result
+ - name: Subvolume named 'simple' removed (idempotency)
+ ansible.builtin.assert:
+ that:
+ - result is not changed
diff --git a/tests/integration/targets/btrfs_subvolume/tasks/test_subvolume_whitespace.yml b/tests/integration/targets/btrfs_subvolume/tasks/test_subvolume_whitespace.yml
index 6a0147af6c..8fc798108f 100644
--- a/tests/integration/targets/btrfs_subvolume/tasks/test_subvolume_whitespace.yml
+++ b/tests/integration/targets/btrfs_subvolume/tasks/test_subvolume_whitespace.yml
@@ -5,58 +5,58 @@
- name: Create a subvolume named 'container'
community.general.btrfs_subvolume:
- automount: Yes
+ automount: true
filesystem_label: "{{ btrfs_subvolume_target_label }}"
name: "/container"
state: "present"
- name: Create a subvolume with whitespace in the name
block:
- - name: Create a subvolume named 'container/my data'
- community.general.btrfs_subvolume:
- automount: Yes
- filesystem_label: "{{ btrfs_subvolume_target_label }}"
- name: "/container/my data"
- state: "present"
- register: result
- - name: Subvolume named 'container/my data' created
- ansible.builtin.assert:
- that:
- - result is changed
- - name: Create a subvolume named 'container/my data' (idempotency)
- community.general.btrfs_subvolume:
- automount: Yes
- filesystem_label: "{{ btrfs_subvolume_target_label }}"
- name: "/container/my data"
- state: "present"
- register: result
- - name: Subvolume named 'container/my data' created (idempotency)
- ansible.builtin.assert:
- that:
- - result is not changed
+ - name: Create a subvolume named 'container/my data'
+ community.general.btrfs_subvolume:
+ automount: true
+ filesystem_label: "{{ btrfs_subvolume_target_label }}"
+ name: "/container/my data"
+ state: "present"
+ register: result
+ - name: Subvolume named 'container/my data' created
+ ansible.builtin.assert:
+ that:
+ - result is changed
+ - name: Create a subvolume named 'container/my data' (idempotency)
+ community.general.btrfs_subvolume:
+ automount: true
+ filesystem_label: "{{ btrfs_subvolume_target_label }}"
+ name: "/container/my data"
+ state: "present"
+ register: result
+ - name: Subvolume named 'container/my data' created (idempotency)
+ ansible.builtin.assert:
+ that:
+ - result is not changed
- name: Remove a subvolume with whitespace in the name
block:
- - name: Remove a subvolume named 'container/my data'
- community.general.btrfs_subvolume:
- automount: Yes
- filesystem_label: "{{ btrfs_subvolume_target_label }}"
- name: "/container/my data"
- state: "absent"
- register: result
- - name: Subvolume named 'container/my data' removed
- ansible.builtin.assert:
- that:
- - result is changed
+ - name: Remove a subvolume named 'container/my data'
+ community.general.btrfs_subvolume:
+ automount: true
+ filesystem_label: "{{ btrfs_subvolume_target_label }}"
+ name: "/container/my data"
+ state: "absent"
+ register: result
+ - name: Subvolume named 'container/my data' removed
+ ansible.builtin.assert:
+ that:
+ - result is changed
- - name: Remove a subvolume named 'container/my data' (idempotency)
- community.general.btrfs_subvolume:
- automount: Yes
- filesystem_label: "{{ btrfs_subvolume_target_label }}"
- name: "/container/my data"
- state: "absent"
- register: result
- - name: Subvolume named 'container/my data' removed (idempotency)
- ansible.builtin.assert:
- that:
- - result is not changed
+ - name: Remove a subvolume named 'container/my data' (idempotency)
+ community.general.btrfs_subvolume:
+ automount: true
+ filesystem_label: "{{ btrfs_subvolume_target_label }}"
+ name: "/container/my data"
+ state: "absent"
+ register: result
+ - name: Subvolume named 'container/my data' removed (idempotency)
+ ansible.builtin.assert:
+ that:
+ - result is not changed
diff --git a/tests/integration/targets/callback/tasks/main.yml b/tests/integration/targets/callback/tasks/main.yml
index 51cef62173..88988f9bf9 100644
--- a/tests/integration/targets/callback/tasks/main.yml
+++ b/tests/integration/targets/callback/tasks/main.yml
@@ -9,59 +9,59 @@
# SPDX-License-Identifier: GPL-3.0-or-later
- block:
- - name: Create temporary playbook files
- tempfile:
- state: file
- suffix: temp
- loop: "{{ tests }}"
- loop_control:
- loop_var: test
- label: "{{ test.name }}"
- register: temporary_playbook_files
+ - name: Create temporary playbook files
+ tempfile:
+ state: file
+ suffix: temp
+ loop: "{{ tests }}"
+ loop_control:
+ loop_var: test
+ label: "{{ test.name }}"
+ register: temporary_playbook_files
- - name: Set temporary playbook file content
- copy:
- content: "{{ test.playbook }}"
- dest: "{{ temporary_playbook_files.results[test_idx].path }}"
- loop: "{{ tests }}"
- loop_control:
- loop_var: test
- index_var: test_idx
- label: "{{ test.name }}"
+ - name: Set temporary playbook file content
+ copy:
+ content: "{{ test.playbook }}"
+ dest: "{{ temporary_playbook_files.results[test_idx].path }}"
+ loop: "{{ tests }}"
+ loop_control:
+ loop_var: test
+ index_var: test_idx
+ label: "{{ test.name }}"
- - name: Collect outputs
- command: "ansible-playbook -i {{ inventory }} {{ playbook }}"
- environment: "{{ test.environment }}"
- loop: "{{ tests }}"
- loop_control:
- loop_var: test
- label: "{{ test.name }}"
- register: outputs
- changed_when: false
- vars:
- inventory: "{{ role_path }}/inventory.yml"
- playbook: "
- {%- for result in temporary_playbook_files.results -%}
- {%- if result.test.name == test.name -%}
- {{- result.path -}}
- {%- endif -%}
- {%- endfor -%}"
+ - name: Collect outputs
+ command: "ansible-playbook -i {{ inventory }} {{ playbook }} {{ test.extra_cli_arguments | default('') }}"
+ environment: "{{ test.environment }}"
+ loop: "{{ tests }}"
+ loop_control:
+ loop_var: test
+ label: "{{ test.name }}"
+ register: outputs
+ changed_when: false
+ vars:
+ inventory: "{{ role_path }}/inventory.yml"
+ playbook: "
+ {%- for result in temporary_playbook_files.results -%}
+ {%- if result.test.name == test.name -%}
+ {{- result.path -}}
+ {%- endif -%}
+ {%- endfor -%}"
- - name: Assert test output equals expected output
- assert:
- that: result.output.differences | length == 0
- loop: "{{ outputs.results | callback_results_extractor }}"
- loop_control:
- loop_var: result
- label: "{{ result.name }}"
- register: assertions
+ - name: Assert test output equals expected output
+ assert:
+ that: result.output.differences | length == 0
+ loop: "{{ outputs.results | callback_results_extractor }}"
+ loop_control:
+ loop_var: result
+ label: "{{ result.name }}"
+ register: assertions
always:
- - name: Remove temporary playbooks
- file:
- path: "{{ temporary_file.path }}"
- state: absent
- loop: "{{ temporary_playbook_files.results }}"
- loop_control:
- loop_var: temporary_file
- label: "{{ temporary_file.test.name }}: {{ temporary_file.path }}"
+ - name: Remove temporary playbooks
+ file:
+ path: "{{ temporary_file.path }}"
+ state: absent
+ loop: "{{ temporary_playbook_files.results }}"
+ loop_control:
+ loop_var: temporary_file
+ label: "{{ temporary_file.test.name }}: {{ temporary_file.path }}"
diff --git a/tests/integration/targets/callback_default_without_diff/tasks/main.yml b/tests/integration/targets/callback_default_without_diff/tasks/main.yml
index 5fc656e847..adb760fd02 100644
--- a/tests/integration/targets/callback_default_without_diff/tasks/main.yml
+++ b/tests/integration/targets/callback_default_without_diff/tasks/main.yml
@@ -41,22 +41,21 @@
content: |
Foo bar
Bar baz bam!
- expected_output: [
- "",
- "PLAY [testhost] ****************************************************************",
- "",
- "TASK [Gathering Facts] *********************************************************",
- "ok: [testhost]",
- "",
- "TASK [Create file] *************************************************************",
- "changed: [testhost]",
- "",
- "TASK [Modify file] *************************************************************",
- "changed: [testhost]",
- "",
- "PLAY RECAP *********************************************************************",
- "testhost : ok=3 changed=2 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 ",
- ]
+ expected_output:
+ - ""
+ - "PLAY [testhost] ****************************************************************"
+ - ""
+ - "TASK [Gathering Facts] *********************************************************"
+ - "ok: [testhost]"
+ - ""
+ - "TASK [Create file] *************************************************************"
+ - "changed: [testhost]"
+ - ""
+ - "TASK [Modify file] *************************************************************"
+ - "changed: [testhost]"
+ - ""
+ - "PLAY RECAP *********************************************************************"
+ - "testhost : ok=3 changed=2 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 "
always:
- name: Clean up temp file
diff --git a/tests/integration/targets/callback_diy/tasks/main.yml b/tests/integration/targets/callback_diy/tasks/main.yml
index fa468b52ba..f1d0c65a5d 100644
--- a/tests/integration/targets/callback_diy/tasks/main.yml
+++ b/tests/integration/targets/callback_diy/tasks/main.yml
@@ -25,18 +25,17 @@
- name: Sample task name
debug:
msg: sample debug msg
- expected_output: [
- "",
- "PLAY [testhost] ****************************************************************",
- "",
- "TASK [Sample task name] ********************************************************",
- "ok: [testhost] => {",
- " \"msg\": \"sample debug msg\"",
- "}",
- "",
- "PLAY RECAP *********************************************************************",
- "testhost : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 "
- ]
+ expected_output:
+ - ""
+ - "PLAY [testhost] ****************************************************************"
+ - ""
+ - "TASK [Sample task name] ********************************************************"
+ - "ok: [testhost] => {"
+ - " \"msg\": \"sample debug msg\""
+ - "}"
+ - ""
+ - "PLAY RECAP *********************************************************************"
+ - "testhost : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 "
- name: Set playbook_on_start_msg callback using environment variable
environment:
@@ -51,19 +50,18 @@
- name: Sample task name
debug:
msg: sample debug msg
- expected_output: [
- "Sample output Sample playbook message",
- "",
- "PLAY [testhost] ****************************************************************",
- "",
- "TASK [Sample task name] ********************************************************",
- "ok: [testhost] => {",
- " \"msg\": \"sample debug msg\"",
- "}",
- "",
- "PLAY RECAP *********************************************************************",
- "testhost : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 "
- ]
+ expected_output:
+ - "Sample output Sample playbook message"
+ - ""
+ - "PLAY [testhost] ****************************************************************"
+ - ""
+ - "TASK [Sample task name] ********************************************************"
+ - "ok: [testhost] => {"
+ - " \"msg\": \"sample debug msg\""
+ - "}"
+ - ""
+ - "PLAY RECAP *********************************************************************"
+ - "testhost : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 "
- name: Set playbook_on_play_start_msg callback using play variable
environment:
@@ -80,17 +78,16 @@
- name: Sample task name
debug:
msg: sample debug msg
- expected_output: [
- "Sample output Sample play name",
- "",
- "TASK [Sample task name] ********************************************************",
- "ok: [testhost] => {",
- " \"msg\": \"sample debug msg\"",
- "}",
- "",
- "PLAY RECAP *********************************************************************",
- "testhost : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 "
- ]
+ expected_output:
+ - "Sample output Sample play name"
+ - ""
+ - "TASK [Sample task name] ********************************************************"
+ - "ok: [testhost] => {"
+ - " \"msg\": \"sample debug msg\""
+ - "}"
+ - ""
+ - "PLAY RECAP *********************************************************************"
+ - "testhost : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 "
- name: Set playbook_on_task_start_msg callback using play variable
environment:
@@ -106,17 +103,16 @@
- name: Sample task name
debug:
msg: sample debug msg
- expected_output: [
- "",
- "PLAY [testhost] ****************************************************************",
- "Sample output Sample task name",
- "ok: [testhost] => {",
- " \"msg\": \"sample debug msg\"",
- "}",
- "",
- "PLAY RECAP *********************************************************************",
- "testhost : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 "
- ]
+ expected_output:
+ - ""
+ - "PLAY [testhost] ****************************************************************"
+ - "Sample output Sample task name"
+ - "ok: [testhost] => {"
+ - " \"msg\": \"sample debug msg\""
+ - "}"
+ - ""
+ - "PLAY RECAP *********************************************************************"
+ - "testhost : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 "
- name: Set playbook_on_task_start_msg callback using task variable
environment:
@@ -132,17 +128,16 @@
msg: sample debug msg
vars:
ansible_callback_diy_playbook_on_task_start_msg: Sample output {{ ansible_callback_diy.task.name }}
- expected_output: [
- "",
- "PLAY [testhost] ****************************************************************",
- "Sample output Sample task name",
- "ok: [testhost] => {",
- " \"msg\": \"sample debug msg\"",
- "}",
- "",
- "PLAY RECAP *********************************************************************",
- "testhost : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 "
- ]
+ expected_output:
+ - ""
+ - "PLAY [testhost] ****************************************************************"
+ - "Sample output Sample task name"
+ - "ok: [testhost] => {"
+ - " \"msg\": \"sample debug msg\""
+ - "}"
+ - ""
+ - "PLAY RECAP *********************************************************************"
+ - "testhost : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 "
- name: Set runner_on_ok_msg callback using task variable
environment:
@@ -158,16 +153,15 @@
msg: sample debug msg
vars:
ansible_callback_diy_runner_on_ok_msg: Sample output {{ ansible_callback_diy.result.output.msg }}
- expected_output: [
- "",
- "PLAY [testhost] ****************************************************************",
- "",
- "TASK [Sample task name] ********************************************************",
- "Sample output sample debug msg",
- "",
- "PLAY RECAP *********************************************************************",
- "testhost : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 "
- ]
+ expected_output:
+ - ""
+ - "PLAY [testhost] ****************************************************************"
+ - ""
+ - "TASK [Sample task name] ********************************************************"
+ - "Sample output sample debug msg"
+ - ""
+ - "PLAY RECAP *********************************************************************"
+ - "testhost : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 "
- name: Set runner_on_failed_msg callback using task variable
environment:
@@ -185,16 +179,15 @@
ignore_errors: true
vars:
ansible_callback_diy_runner_on_failed_msg: Sample output Sample failure message
- expected_output: [
- "",
- "PLAY [testhost] ****************************************************************",
- "",
- "TASK [Sample task name] ********************************************************",
- "Sample output Sample failure message",
- "",
- "PLAY RECAP *********************************************************************",
- "testhost : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=1 "
- ]
+ expected_output:
+ - ""
+ - "PLAY [testhost] ****************************************************************"
+ - ""
+ - "TASK [Sample task name] ********************************************************"
+ - "Sample output Sample failure message"
+ - ""
+ - "PLAY RECAP *********************************************************************"
+ - "testhost : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=1 "
- name: Set runner_on_skipped_msg callback using task variable
environment:
@@ -211,16 +204,15 @@
when: false
vars:
ansible_callback_diy_runner_on_skipped_msg: Sample output Skipped {{ ansible_callback_diy.task.name }}
- expected_output: [
- "",
- "PLAY [testhost] ****************************************************************",
- "",
- "TASK [Sample task name] ********************************************************",
- "Sample output Skipped Sample task name",
- "",
- "PLAY RECAP *********************************************************************",
- "testhost : ok=0 changed=0 unreachable=0 failed=0 skipped=1 rescued=0 ignored=0 "
- ]
+ expected_output:
+ - ""
+ - "PLAY [testhost] ****************************************************************"
+ - ""
+ - "TASK [Sample task name] ********************************************************"
+ - "Sample output Skipped Sample task name"
+ - ""
+ - "PLAY RECAP *********************************************************************"
+ - "testhost : ok=0 changed=0 unreachable=0 failed=0 skipped=1 rescued=0 ignored=0 "
- name: Set runner_item_on_ok_msg callback using task variable
environment:
@@ -240,18 +232,17 @@
- sample item 3
vars:
ansible_callback_diy_runner_item_on_ok_msg: Sample output Looping {{ ansible_callback_diy.result.output.msg }}
- expected_output: [
- "",
- "PLAY [testhost] ****************************************************************",
- "",
- "TASK [Sample task name] ********************************************************",
- "Sample output Looping sample debug msg sample item 1",
- "Sample output Looping sample debug msg sample item 2",
- "Sample output Looping sample debug msg sample item 3",
- "",
- "PLAY RECAP *********************************************************************",
- "testhost : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 "
- ]
+ expected_output:
+ - ""
+ - "PLAY [testhost] ****************************************************************"
+ - ""
+ - "TASK [Sample task name] ********************************************************"
+ - "Sample output Looping sample debug msg sample item 1"
+ - "Sample output Looping sample debug msg sample item 2"
+ - "Sample output Looping sample debug msg sample item 3"
+ - ""
+ - "PLAY RECAP *********************************************************************"
+ - "testhost : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 "
- name: Set runner_item_on_failed_msg callback using task variable
environment:
@@ -273,28 +264,25 @@
ignore_errors: true
vars:
ansible_callback_diy_runner_item_on_failed_msg: Sample output Looping sample failure message
- expected_output: [
- "",
- "PLAY [testhost] ****************************************************************",
- "",
- "TASK [Sample task name] ********************************************************",
- "ok: [testhost] => (item=sample item 1) => {",
- " \"msg\": \"sample debug msg sample item 1\"",
- "}",
- "Sample output Looping sample failure message",
- "ok: [testhost] => (item=sample item 3) => {",
- " \"msg\": \"sample debug msg sample item 3\"",
- "}",
- [
- # Apparently a bug was fixed in Ansible, as before it ran through with "All items completed"
- "fatal: [testhost]: FAILED! => {\"msg\": \"All items completed\"}",
- "fatal: [testhost]: FAILED! => {\"msg\": \"One or more items failed\"}",
- ],
- "...ignoring",
- "",
- "PLAY RECAP *********************************************************************",
- "testhost : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=1 "
- ]
+ expected_output:
+ - ""
+ - "PLAY [testhost] ****************************************************************"
+ - ""
+ - "TASK [Sample task name] ********************************************************"
+ - "ok: [testhost] => (item=sample item 1) => {"
+ - " \"msg\": \"sample debug msg sample item 1\""
+ - "}"
+ - "Sample output Looping sample failure message"
+ - "ok: [testhost] => (item=sample item 3) => {"
+ - " \"msg\": \"sample debug msg sample item 3\""
+ - "}"
+ - # Apparently a bug was fixed in Ansible, as before it ran through with "All items completed"
+ - "fatal: [testhost]: FAILED! => {\"msg\": \"All items completed\"}"
+ - "fatal: [testhost]: FAILED! => {\"msg\": \"One or more items failed\"}"
+ - "...ignoring"
+ - ""
+ - "PLAY RECAP *********************************************************************"
+ - "testhost : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=1 "
- name: Set runner_item_on_skipped_msg callback using task variable
environment:
@@ -315,22 +303,21 @@
when: item != 'sample item 2'
vars:
ansible_callback_diy_runner_item_on_skipped_msg: Sample output Looping Skipped {{ ansible_callback_diy.result.output.item }}
- expected_output: [
- "",
- "PLAY [testhost] ****************************************************************",
- "",
- "TASK [Sample task name] ********************************************************",
- "ok: [testhost] => (item=sample item 1) => {",
- " \"msg\": \"sample debug msg sample item 1\"",
- "}",
- "Sample output Looping Skipped sample item 2",
- "ok: [testhost] => (item=sample item 3) => {",
- " \"msg\": \"sample debug msg sample item 3\"",
- "}",
- "",
- "PLAY RECAP *********************************************************************",
- "testhost : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 "
- ]
+ expected_output:
+ - ""
+ - "PLAY [testhost] ****************************************************************"
+ - ""
+ - "TASK [Sample task name] ********************************************************"
+ - "ok: [testhost] => (item=sample item 1) => {"
+ - " \"msg\": \"sample debug msg sample item 1\""
+ - "}"
+ - "Sample output Looping Skipped sample item 2"
+ - "ok: [testhost] => (item=sample item 3) => {"
+ - " \"msg\": \"sample debug msg sample item 3\""
+ - "}"
+ - ""
+ - "PLAY RECAP *********************************************************************"
+ - "testhost : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 "
- name: Set playbook_on_stats_msg callback using play variable
environment:
@@ -371,20 +358,19 @@
- name: Sample task name
debug:
msg: sample debug msg
- expected_output: [
- "",
- "PLAY [testhost] ****************************************************************",
- "",
- "TASK [Sample task name] ********************************************************",
- "ok: [testhost] => {",
- " \"msg\": \"sample debug msg\"",
- "}",
- " Sample output stats",
- "===============================",
- " ok : testhost: 1",
- "",
- " processed : testhost: 1"
- ]
+ expected_output:
+ - ""
+ - "PLAY [testhost] ****************************************************************"
+ - ""
+ - "TASK [Sample task name] ********************************************************"
+ - "ok: [testhost] => {"
+ - " \"msg\": \"sample debug msg\""
+ - "}"
+ - " Sample output stats"
+ - "==============================="
+ - " ok : testhost: 1"
+ - ""
+ - " processed : testhost: 1"
- name: Suppress output on playbook_on_task_start_msg callback using task variable
environment:
@@ -400,16 +386,15 @@
msg: sample debug msg
vars:
ansible_callback_diy_playbook_on_task_start_msg: ''
- expected_output: [
- "",
- "PLAY [testhost] ****************************************************************",
- "ok: [testhost] => {",
- " \"msg\": \"sample debug msg\"",
- "}",
- "",
- "PLAY RECAP *********************************************************************",
- "testhost : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 "
- ]
+ expected_output:
+ - ""
+ - "PLAY [testhost] ****************************************************************"
+ - "ok: [testhost] => {"
+ - " \"msg\": \"sample debug msg\""
+ - "}"
+ - ""
+ - "PLAY RECAP *********************************************************************"
+ - "testhost : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 "
- name: Suppress output on runner_on_ok_msg callback using task variable
environment:
@@ -425,15 +410,14 @@
msg: sample debug msg
vars:
ansible_callback_diy_runner_on_ok_msg: ''
- expected_output: [
- "",
- "PLAY [testhost] ****************************************************************",
- "",
- "TASK [Sample task name] ********************************************************",
- "",
- "PLAY RECAP *********************************************************************",
- "testhost : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 "
- ]
+ expected_output:
+ - ""
+ - "PLAY [testhost] ****************************************************************"
+ - ""
+ - "TASK [Sample task name] ********************************************************"
+ - ""
+ - "PLAY RECAP *********************************************************************"
+ - "testhost : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 "
- name: Set runner_on_ok_msg_color using task variable
environment:
@@ -450,13 +434,72 @@
vars:
ansible_callback_diy_runner_on_ok_msg: Sample output {{ ansible_callback_diy.result.output.msg }}
ansible_callback_diy_runner_on_ok_msg_color: blue
- expected_output: [
- "",
- "PLAY [testhost] ****************************************************************",
- "",
- "TASK [Sample task name] ********************************************************",
- "Sample output sample debug msg",
- "",
- "PLAY RECAP *********************************************************************",
- "testhost : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 "
- ]
+ expected_output:
+ - ""
+ - "PLAY [testhost] ****************************************************************"
+ - ""
+ - "TASK [Sample task name] ********************************************************"
+ - "Sample output sample debug msg"
+ - ""
+ - "PLAY RECAP *********************************************************************"
+ - "testhost : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 "
+
+ - name: Set on_any_msg
+ environment:
+ ANSIBLE_NOCOLOR: 'true'
+ ANSIBLE_FORCE_COLOR: 'false'
+ ANSIBLE_STDOUT_CALLBACK: community.general.diy
+ ANSIBLE_CALLBACK_DIY_ON_ANY_MSG: foo
+ playbook: !unsafe |
+ - hosts: testhost
+ gather_facts: false
+ tasks:
+ - name: A loop
+ debug:
+ msg: "{{ item }}"
+ loop:
+ - 1
+ - 2
+ - name: Sample task name
+ debug:
+ msg: sample debug msg
+ - name: Skipped task
+ command: ls /
+ when: false
+ expected_output:
+ - "foo"
+ - ""
+ - "PLAY [testhost] ****************************************************************"
+ - "foo"
+ - ""
+ - "TASK [A loop] ******************************************************************"
+ - "foo"
+ - "foo"
+ - "ok: [testhost] => (item=1) => {"
+ - " \"msg\": 1"
+ - "}"
+ - "foo"
+ - "ok: [testhost] => (item=2) => {"
+ - " \"msg\": 2"
+ - "}"
+ - "foo"
+ - "foo"
+ - ""
+ - "TASK [Sample task name] ********************************************************"
+ - "foo"
+ - "foo"
+ - "ok: [testhost] => {"
+ - " \"msg\": \"sample debug msg\""
+ - "}"
+ - "foo"
+ - ""
+ - "TASK [Skipped task] ************************************************************"
+ - "foo"
+ - "foo"
+ - "skipping: [testhost]"
+ - "foo"
+ - ""
+ - "PLAY RECAP *********************************************************************"
+ - "testhost : ok=2 changed=0 unreachable=0 failed=0 skipped=1 rescued=0 ignored=0 "
+ - ""
+ - "foo"
diff --git a/tests/integration/targets/proxmox_pool/aliases b/tests/integration/targets/callback_print_task/aliases
similarity index 81%
rename from tests/integration/targets/proxmox_pool/aliases
rename to tests/integration/targets/callback_print_task/aliases
index 525dcd332b..3e2dd244c1 100644
--- a/tests/integration/targets/proxmox_pool/aliases
+++ b/tests/integration/targets/callback_print_task/aliases
@@ -2,6 +2,5 @@
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-unsupported
-proxmox_pool
-proxmox_pool_member
+azp/posix/3
+needs/target/callback
diff --git a/tests/integration/targets/callback_print_task/tasks/main.yml b/tests/integration/targets/callback_print_task/tasks/main.yml
new file mode 100644
index 0000000000..0324a9d698
--- /dev/null
+++ b/tests/integration/targets/callback_print_task/tasks/main.yml
@@ -0,0 +1,128 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Run tests
+ include_role:
+ name: callback
+ vars:
+ tests:
+ - name: community.general.print_task is not enabled
+ environment:
+ ANSIBLE_NOCOLOR: 'true'
+ ANSIBLE_FORCE_COLOR: 'false'
+ playbook: |
+ - hosts: testhost
+ gather_facts: false
+ tasks:
+ - name: Sample task
+ debug:
+ msg: This is a test
+ expected_output:
+ - ""
+ - "PLAY [testhost] ****************************************************************"
+ - ""
+ - "TASK [Sample task] *************************************************************"
+ - "ok: [testhost] => {"
+ - " \"msg\": \"This is a test\""
+ - "}"
+ - ""
+ - "PLAY RECAP *********************************************************************"
+ - "testhost : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 "
+
+ - name: community.general.print_task is enabled
+ environment:
+ ANSIBLE_NOCOLOR: 'true'
+ ANSIBLE_FORCE_COLOR: 'false'
+ ANSIBLE_CALLBACKS_ENABLED: 'community.general.print_task'
+ playbook: |
+ - hosts: testhost
+ gather_facts: false
+ tasks:
+ - name: Sample task
+ debug:
+ msg: This is a test
+ expected_output:
+ - ""
+ - "PLAY [testhost] ****************************************************************"
+ - ""
+ - "TASK [Sample task] *************************************************************"
+ - ""
+ - "- name: Sample task"
+ - " debug:"
+ - " msg: This is a test"
+ - ""
+ - "ok: [testhost] => {"
+ - " \"msg\": \"This is a test\""
+ - "}"
+ - ""
+ - "PLAY RECAP *********************************************************************"
+ - "testhost : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 "
+
+ - name: Print with msg parameter on the same line
+ environment:
+ ANSIBLE_NOCOLOR: 'true'
+ ANSIBLE_FORCE_COLOR: 'false'
+ ANSIBLE_CALLBACKS_ENABLED: 'community.general.print_task'
+ playbook: |
+ - hosts: testhost
+ gather_facts: false
+ tasks:
+ - name: Sample task
+ debug: msg="This is a test"
+ expected_output:
+ - ""
+ - "PLAY [testhost] ****************************************************************"
+ - ""
+ - "TASK [Sample task] *************************************************************"
+ - ""
+ - "- name: Sample task"
+ - " debug: msg=\"This is a test\""
+ - ""
+ - "ok: [testhost] => {"
+ - " \"msg\": \"This is a test\""
+ - "}"
+ - ""
+ - "PLAY RECAP *********************************************************************"
+ - "testhost : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 "
+
+ - name: Task with additional parameters
+ environment:
+ ANSIBLE_NOCOLOR: 'true'
+ ANSIBLE_FORCE_COLOR: 'false'
+ ANSIBLE_CALLBACKS_ENABLED: 'community.general.print_task'
+ playbook: |
+ - hosts: testhost
+ gather_facts: false
+ tasks:
+ - name: Sample task
+ when: True
+ vars:
+ test_var: "Hello World"
+ debug:
+ var: test_var
+ expected_output:
+ - ""
+ - "PLAY [testhost] ****************************************************************"
+ - ""
+ - "TASK [Sample task] *************************************************************"
+ - ""
+ - "- name: Sample task"
+ - " when: true"
+ - " vars:"
+ - " test_var: Hello World"
+ - " debug:"
+ - " var: test_var"
+ - ""
+ - "ok: [testhost] => {"
+ - " \"test_var\": \"Hello World\""
+ - "}"
+ - ""
+ - "PLAY RECAP *********************************************************************"
+ - "testhost : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 "
diff --git a/tests/integration/targets/proxmox/aliases b/tests/integration/targets/callback_tasks_only/aliases
similarity index 68%
rename from tests/integration/targets/proxmox/aliases
rename to tests/integration/targets/callback_tasks_only/aliases
index 5e5957a5c2..3e2dd244c1 100644
--- a/tests/integration/targets/proxmox/aliases
+++ b/tests/integration/targets/callback_tasks_only/aliases
@@ -2,8 +2,5 @@
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-unsupported
-proxmox_domain_info
-proxmox_group_info
-proxmox_user_info
-proxmox_storage_info
+azp/posix/3
+needs/target/callback
diff --git a/tests/integration/targets/callback_tasks_only/tasks/main.yml b/tests/integration/targets/callback_tasks_only/tasks/main.yml
new file mode 100644
index 0000000000..b02ddc8efc
--- /dev/null
+++ b/tests/integration/targets/callback_tasks_only/tasks/main.yml
@@ -0,0 +1,79 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- block:
+ - name: Create temporary file
+ tempfile:
+ register: tempfile
+
+ - name: Run tests
+ include_role:
+ name: callback
+ vars:
+ tests:
+ - name: Simple test
+ environment:
+ ANSIBLE_NOCOLOR: 'true'
+ ANSIBLE_FORCE_COLOR: 'false'
+ ANSIBLE_PYTHON_INTERPRETER: "{{ ansible_python_interpreter }}"
+ ANSIBLE_STDOUT_CALLBACK: community.general.tasks_only
+ playbook: |
+ - hosts: testhost
+ gather_facts: true
+ tasks:
+ - name: Create file
+ copy:
+ dest: "{{ tempfile.path }}"
+ content: |
+ Foo bar
+
+ - name: Modify file
+ copy:
+ dest: "{{ tempfile.path }}"
+ content: |
+ Foo bar
+ Bar baz bam!
+ expected_output:
+ - ""
+ - "TASK [Gathering Facts] *********************************************************"
+ - "ok: [testhost]"
+ - ""
+ - "TASK [Create file] *************************************************************"
+ - "changed: [testhost]"
+ - ""
+ - "TASK [Modify file] *************************************************************"
+ - "changed: [testhost]"
+ - name: Different column width
+ environment:
+ ANSIBLE_NOCOLOR: 'true'
+ ANSIBLE_FORCE_COLOR: 'false'
+ ANSIBLE_PYTHON_INTERPRETER: "{{ ansible_python_interpreter }}"
+ ANSIBLE_STDOUT_CALLBACK: community.general.tasks_only
+ ANSIBLE_COLLECTIONS_TASKS_ONLY_NUMBER_OF_COLUMNS: 40
+ playbook: |
+ - hosts: testhost
+ gather_facts: false
+ tasks:
+ - name: A task
+ debug:
+ msg: Test.
+ expected_output:
+ - ""
+ - "TASK [A task] ***************************"
+ - "ok: [testhost] => {"
+ - ' "msg": "Test."'
+ - "}"
+
+
+ always:
+ - name: Clean up temp file
+ file:
+ path: "{{ tempfile.path }}"
+ state: absent
diff --git a/tests/integration/targets/callback_timestamp/tasks/main.yml b/tests/integration/targets/callback_timestamp/tasks/main.yml
index 5e0acc15f0..41681a5f42 100644
--- a/tests/integration/targets/callback_timestamp/tasks/main.yml
+++ b/tests/integration/targets/callback_timestamp/tasks/main.yml
@@ -26,18 +26,17 @@
- name: Sample task name
debug:
msg: sample debug msg
- expected_output: [
- "",
- "PLAY [testhost] ******************************************************* 15:04:05",
- "",
- "TASK [Sample task name] *********************************************** 15:04:05",
- "ok: [testhost] => {",
- " \"msg\": \"sample debug msg\"",
- "}",
- "",
- "PLAY RECAP ************************************************************ 15:04:05",
- "testhost : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 "
- ]
+ expected_output:
+ - ""
+ - "PLAY [testhost] ******************************************************* 15:04:05"
+ - ""
+ - "TASK [Sample task name] *********************************************** 15:04:05"
+ - "ok: [testhost] => {"
+ - " \"msg\": \"sample debug msg\""
+ - "}"
+ - ""
+ - "PLAY RECAP ************************************************************ 15:04:05"
+ - "testhost : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 "
- name: Enable timestamp in the longer length
environment:
@@ -52,15 +51,14 @@
- name: Sample task name
debug:
msg: sample debug msg
- expected_output: [
- "",
- "PLAY [testhost] ******************************************** 2006-01-02T15:04:05",
- "",
- "TASK [Sample task name] ************************************ 2006-01-02T15:04:05",
- "ok: [testhost] => {",
- " \"msg\": \"sample debug msg\"",
- "}",
- "",
- "PLAY RECAP ************************************************* 2006-01-02T15:04:05",
- "testhost : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 "
- ]
+ expected_output:
+ - ""
+ - "PLAY [testhost] ******************************************** 2006-01-02T15:04:05"
+ - ""
+ - "TASK [Sample task name] ************************************ 2006-01-02T15:04:05"
+ - "ok: [testhost] => {"
+ - " \"msg\": \"sample debug msg\""
+ - "}"
+ - ""
+ - "PLAY RECAP ************************************************* 2006-01-02T15:04:05"
+ - "testhost : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 "
diff --git a/tests/integration/targets/proxmox_pool/defaults/main.yml b/tests/integration/targets/callback_yaml/meta/main.yml
similarity index 58%
rename from tests/integration/targets/proxmox_pool/defaults/main.yml
rename to tests/integration/targets/callback_yaml/meta/main.yml
index 5a518ac734..982de6eb03 100644
--- a/tests/integration/targets/proxmox_pool/defaults/main.yml
+++ b/tests/integration/targets/callback_yaml/meta/main.yml
@@ -1,7 +1,7 @@
-# Copyright (c) 2023, Sergei Antipov
+---
+# Copyright (c) Ansible Project
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-poolid: test
-member: local
-member_type: storage
+dependencies:
+ - setup_remote_tmp_dir
diff --git a/tests/integration/targets/callback_yaml/tasks/main.yml b/tests/integration/targets/callback_yaml/tasks/main.yml
index a66892e0f0..8e286e45f4 100644
--- a/tests/integration/targets/callback_yaml/tasks/main.yml
+++ b/tests/integration/targets/callback_yaml/tasks/main.yml
@@ -8,6 +8,11 @@
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
+- name: Write vault password to disk
+ ansible.builtin.copy:
+ dest: "{{ remote_tmp_dir }}/vault-password"
+ content: asdf
+
- name: Run tests
include_role:
name: callback
@@ -25,17 +30,17 @@
- name: Sample task name
debug:
msg: sample debug msg
- expected_output: [
- "",
- "PLAY [testhost] ****************************************************************",
- "",
- "TASK [Sample task name] ********************************************************",
- "ok: [testhost] => ",
- " msg: sample debug msg",
- "",
- "PLAY RECAP *********************************************************************",
- "testhost : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 "
- ]
+ expected_output:
+ - ""
+ - "PLAY [testhost] ****************************************************************"
+ - ""
+ - "TASK [Sample task name] ********************************************************"
+ - "ok: [testhost] => "
+ - " msg: sample debug msg"
+ - ""
+ - "PLAY RECAP *********************************************************************"
+ - "testhost : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 "
+
- name: Test umlauts in multiline
environment:
ANSIBLE_NOCOLOR: 'true'
@@ -48,20 +53,20 @@
- name: Umlaut output
debug:
msg: "äöü\néêè\nßï☺"
- expected_output: [
- "",
- "PLAY [testhost] ****************************************************************",
- "",
- "TASK [Umlaut output] ***********************************************************",
- "ok: [testhost] => ",
- " msg: |-",
- " äöü",
- " éêè",
- " ßï☺",
- "",
- "PLAY RECAP *********************************************************************",
- "testhost : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 "
- ]
+ expected_output:
+ - ""
+ - "PLAY [testhost] ****************************************************************"
+ - ""
+ - "TASK [Umlaut output] ***********************************************************"
+ - "ok: [testhost] => "
+ - " msg: |-"
+ - " äöü"
+ - " éêè"
+ - " ßï☺"
+ - ""
+ - "PLAY RECAP *********************************************************************"
+ - "testhost : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 "
+
- name: Test to_yaml
environment:
ANSIBLE_NOCOLOR: 'true'
@@ -79,21 +84,60 @@
- name: Test to_yaml
debug:
msg: "{{ data | to_yaml }}"
- expected_output: [
- "",
- "PLAY [testhost] ****************************************************************",
- "",
- "TASK [Test to_yaml] ************************************************************",
- "ok: [testhost] => ",
- " msg: |-",
- " 'line 1",
- " ",
- " line 2",
- " ",
- " line 3",
- " ",
- " '",
- "",
- "PLAY RECAP *********************************************************************",
- "testhost : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 "
- ]
+ expected_output:
+ - ""
+ - "PLAY [testhost] ****************************************************************"
+ - ""
+ - "TASK [Test to_yaml] ************************************************************"
+ - "ok: [testhost] => "
+ - " msg: |-"
+ - " 'line 1"
+ - " "
+ - " line 2"
+ - " "
+ - " line 3"
+ - " "
+ - " '"
+ - ""
+ - "PLAY RECAP *********************************************************************"
+ - "testhost : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 "
+ - name: Some more fun with data tagging
+ environment:
+ ANSIBLE_NOCOLOR: 'true'
+ ANSIBLE_FORCE_COLOR: 'false'
+ ANSIBLE_STDOUT_CALLBACK: community.general.yaml
+ extra_cli_arguments: "--vault-password-file {{ remote_tmp_dir }}/vault-password"
+ playbook: !unsafe |
+ - hosts: testhost
+ gather_facts: false
+ vars:
+ foo: bar
+ baz: !vault |
+ $ANSIBLE_VAULT;1.1;AES256
+ 30393064316433636636373336363538663034643135363938646665393661353833633865313765
+ 3835366434646339313337663335393865336163663434310a316161313662666466333332353731
+ 64663064366461643162666137303737643164376134303034306366383830336232363837636638
+ 3830653338626130360a313639623231353931356563313065373661303262646337383534663932
+ 64353461663065333362346264326335373032313333343539646661656634653138646332313639
+ 3566313765626464613734623664663266336237646139373935
+ tasks:
+ - name: Test regular string
+ debug:
+ var: foo
+ - name: Test vaulted string
+ debug:
+ var: baz
+ expected_output:
+ - ""
+ - "PLAY [testhost] ****************************************************************"
+ - ""
+ - "TASK [Test regular string] *****************************************************"
+ - "ok: [testhost] => "
+ - " foo: bar"
+ - ""
+ - "TASK [Test vaulted string] *****************************************************"
+ - "ok: [testhost] => "
+ - " baz: aBcDeFgHiJkLmNoPqRsTuVwXyZ012345"
+ - ""
+ - "PLAY RECAP *********************************************************************"
+ - "testhost : ok=2 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 "
diff --git a/tests/integration/targets/cargo/tasks/main.yml b/tests/integration/targets/cargo/tasks/main.yml
index d9048f77bf..f28b459864 100644
--- a/tests/integration/targets/cargo/tasks/main.yml
+++ b/tests/integration/targets/cargo/tasks/main.yml
@@ -14,9 +14,9 @@
CARGO_NET_GIT_FETCH_WITH_CLI: "true"
when: has_cargo | default(false) and ansible_distribution == 'Alpine'
- block:
- - import_tasks: test_general.yml
- - import_tasks: test_version.yml
- - import_tasks: test_directory.yml
+ - import_tasks: test_general.yml
+ - import_tasks: test_version.yml
+ - import_tasks: test_directory.yml
environment: "{{ cargo_environment }}"
when: has_cargo | default(false)
- import_tasks: test_rustup_cargo.yml
diff --git a/tests/integration/targets/cargo/tasks/setup.yml b/tests/integration/targets/cargo/tasks/setup.yml
index 7eec97ac4c..6a98494106 100644
--- a/tests/integration/targets/cargo/tasks/setup.yml
+++ b/tests/integration/targets/cargo/tasks/setup.yml
@@ -4,12 +4,12 @@
# SPDX-License-Identifier: GPL-3.0-or-later
- block:
- - name: Install cargo
- package:
- name: cargo
- state: present
- - set_fact:
- has_cargo: true
+ - name: Install cargo
+ package:
+ name: cargo
+ state: present
+ - set_fact:
+ has_cargo: true
when:
- ansible_system != 'FreeBSD'
- ansible_distribution != 'MacOSX'
@@ -18,25 +18,25 @@
- ansible_distribution != 'Ubuntu' or ansible_distribution_version is version('18', '>=')
- block:
- - name: Install rust (containing cargo)
- package:
- name: rust
- state: present
- - set_fact:
- has_cargo: true
+ - name: Install rust (containing cargo)
+ package:
+ name: rust
+ state: present
+ - set_fact:
+ has_cargo: true
when:
- ansible_system == 'FreeBSD' and ansible_distribution_version is version('13.0', '>')
- block:
- - name: Download rustup
- get_url:
- url: https://sh.rustup.rs
- dest: /tmp/sh.rustup.rs
- mode: "0750"
- force: true
- - name: Install rustup cargo
- command: /tmp/sh.rustup.rs -y
- - set_fact:
- rustup_cargo_bin: "{{ lookup('env', 'HOME') }}/.cargo/bin/cargo"
+ - name: Download rustup
+ get_url:
+ url: https://sh.rustup.rs
+ dest: /tmp/sh.rustup.rs
+ mode: "0750"
+ force: true
+ - name: Install rustup cargo
+ command: /tmp/sh.rustup.rs -y
+ - set_fact:
+ rustup_cargo_bin: "{{ lookup('env', 'HOME') }}/.cargo/bin/cargo"
when:
- ansible_distribution != 'CentOS' or ansible_distribution_version is version('7.0', '>=')
diff --git a/tests/integration/targets/cargo/tasks/test_directory.yml b/tests/integration/targets/cargo/tasks/test_directory.yml
index f4275ede68..b0b7120388 100644
--- a/tests/integration/targets/cargo/tasks/test_directory.yml
+++ b/tests/integration/targets/cargo/tasks/test_directory.yml
@@ -24,7 +24,7 @@
path: "{{ manifest_path }}"
regexp: '^version = ".*"$'
line: 'version = "1.0.0"'
-
+
- name: Ensure package is uninstalled
community.general.cargo:
name: "{{ package_name }}"
diff --git a/tests/integration/targets/cargo/tasks/test_general.yml b/tests/integration/targets/cargo/tasks/test_general.yml
index 2bffa08f0d..07e96cd4ac 100644
--- a/tests/integration/targets/cargo/tasks/test_general.yml
+++ b/tests/integration/targets/cargo/tasks/test_general.yml
@@ -29,7 +29,7 @@
- name: Check assertions helloworld
assert:
that:
- - uninstall_absent_helloworld is not changed
- - install_absent_helloworld is changed
- - install_present_helloworld is not changed
- - uninstall_present_helloworld is changed
+ - uninstall_absent_helloworld is not changed
+ - install_absent_helloworld is changed
+ - install_present_helloworld is not changed
+ - uninstall_present_helloworld is changed
diff --git a/tests/integration/targets/cargo/tasks/test_rustup_cargo.yml b/tests/integration/targets/cargo/tasks/test_rustup_cargo.yml
index ec2cf6e6de..638dd2600a 100644
--- a/tests/integration/targets/cargo/tasks/test_rustup_cargo.yml
+++ b/tests/integration/targets/cargo/tasks/test_rustup_cargo.yml
@@ -19,5 +19,5 @@
- name: Check assertions helloworld
assert:
that:
- - rustup_install_absent_helloworld is changed
- - rustup_uninstall_present_helloworld is changed
+ - rustup_install_absent_helloworld is changed
+ - rustup_uninstall_present_helloworld is changed
diff --git a/tests/integration/targets/cargo/tasks/test_version.yml b/tests/integration/targets/cargo/tasks/test_version.yml
index c1ab8e198d..701f23e1f5 100644
--- a/tests/integration/targets/cargo/tasks/test_version.yml
+++ b/tests/integration/targets/cargo/tasks/test_version.yml
@@ -42,9 +42,9 @@
- name: Check assertions helloworld-yliu
assert:
that:
- - install_helloworld_010 is changed
- - install_helloworld_010_idem is not changed
- - upgrade_helloworld_010 is changed
- - upgrade_helloworld_010_idem is not changed
- - downgrade_helloworld_010 is changed
- - downgrade_helloworld_010_idem is not changed
+ - install_helloworld_010 is changed
+ - install_helloworld_010_idem is not changed
+ - upgrade_helloworld_010 is changed
+ - upgrade_helloworld_010_idem is not changed
+ - downgrade_helloworld_010 is changed
+ - downgrade_helloworld_010_idem is not changed
diff --git a/tests/integration/targets/cloud_init_data_facts/tasks/main.yml b/tests/integration/targets/cloud_init_data_facts/tasks/main.yml
index 2b67b5c174..71161603f9 100644
--- a/tests/integration/targets/cloud_init_data_facts/tasks/main.yml
+++ b/tests/integration/targets/cloud_init_data_facts/tasks/main.yml
@@ -25,53 +25,53 @@
# Will also have to skip on OpenSUSE when running on Python 2 on newer Leap versions
# (!= 42 and >= 15) as cloud-init will install the Python 3 package, breaking our build on py2.
when:
- - not (ansible_distribution == "Ubuntu" and ansible_distribution_major_version|int == 14)
- - not (ansible_os_family == "Suse" and ansible_distribution_major_version|int != 42 and ansible_python.version.major != 3)
- - not (ansible_os_family == "Suse" and ansible_distribution_major_version|int == 15)
- - not (ansible_distribution == "CentOS" and ansible_distribution_major_version|int == 8) # TODO: cannot start service
- - not (ansible_distribution == 'Archlinux') # TODO: package seems to be broken, cannot be downloaded from mirrors?
- - not (ansible_distribution == 'Alpine') # TODO: not sure what's wrong here, the module doesn't return what the tests expect
+ - not (ansible_distribution == "Ubuntu" and ansible_distribution_major_version|int == 14)
+ - not (ansible_os_family == "Suse" and ansible_distribution_major_version|int != 42 and ansible_python.version.major != 3)
+ - not (ansible_os_family == "Suse" and ansible_distribution_major_version|int == 15)
+ - not (ansible_distribution == "CentOS" and ansible_distribution_major_version|int == 8) # TODO: cannot start service
+ - not (ansible_distribution == 'Archlinux') # TODO: package seems to be broken, cannot be downloaded from mirrors?
+ - not (ansible_distribution == 'Alpine') # TODO: not sure what's wrong here, the module doesn't return what the tests expect
block:
- - name: setup install cloud-init
- package:
- name:
- - cloud-init
- - udev
+ - name: setup install cloud-init
+ package:
+ name:
+ - cloud-init
+ - udev
- - name: Ensure systemd-network user exists
- user:
- name: systemd-network
- state: present
- when: ansible_distribution == 'Fedora' and ansible_distribution_major_version|int >= 37
+ - name: Ensure systemd-network user exists
+ user:
+ name: systemd-network
+ state: present
+ when: ansible_distribution == 'Fedora' and ansible_distribution_major_version|int >= 37
- - name: setup run cloud-init
- service:
- name: cloud-init-local
- state: restarted
+ - name: setup run cloud-init
+ service:
+ name: cloud-init-local
+ state: restarted
- - name: test gather cloud-init facts in check mode
- cloud_init_data_facts:
- check_mode: true
- register: result
- - name: verify test gather cloud-init facts in check mode
- assert:
- that:
- - result.cloud_init_data_facts.status.v1 is defined
- - result.cloud_init_data_facts.status.v1.stage is defined
- - not result.cloud_init_data_facts.status.v1.stage
- - cloud_init_data_facts.status.v1 is defined
- - cloud_init_data_facts.status.v1.stage is defined
- - not cloud_init_data_facts.status.v1.stage
+ - name: test gather cloud-init facts in check mode
+ cloud_init_data_facts:
+ check_mode: true
+ register: result
+ - name: verify test gather cloud-init facts in check mode
+ assert:
+ that:
+ - result.cloud_init_data_facts.status.v1 is defined
+ - result.cloud_init_data_facts.status.v1.stage is defined
+ - not result.cloud_init_data_facts.status.v1.stage
+ - cloud_init_data_facts.status.v1 is defined
+ - cloud_init_data_facts.status.v1.stage is defined
+ - not cloud_init_data_facts.status.v1.stage
- - name: test gather cloud-init facts
- cloud_init_data_facts:
- register: result
- - name: verify test gather cloud-init facts
- assert:
- that:
- - result.cloud_init_data_facts.status.v1 is defined
- - result.cloud_init_data_facts.status.v1.stage is defined
- - not result.cloud_init_data_facts.status.v1.stage
- - cloud_init_data_facts.status.v1 is defined
- - cloud_init_data_facts.status.v1.stage is defined
- - not cloud_init_data_facts.status.v1.stage
+ - name: test gather cloud-init facts
+ cloud_init_data_facts:
+ register: result
+ - name: verify test gather cloud-init facts
+ assert:
+ that:
+ - result.cloud_init_data_facts.status.v1 is defined
+ - result.cloud_init_data_facts.status.v1.stage is defined
+ - not result.cloud_init_data_facts.status.v1.stage
+ - cloud_init_data_facts.status.v1 is defined
+ - cloud_init_data_facts.status.v1.stage is defined
+ - not cloud_init_data_facts.status.v1.stage
diff --git a/tests/integration/targets/cmd_runner/vars/main.yml b/tests/integration/targets/cmd_runner/vars/main.yml
index 40c8d10af6..7bce9328ee 100644
--- a/tests/integration/targets/cmd_runner/vars/main.yml
+++ b/tests/integration/targets/cmd_runner/vars/main.yml
@@ -253,3 +253,5 @@ cmd_echo_tests:
assertions:
- >
"No such file or directory" in test_result.msg
+ or
+ "Error executing command." == test_result.msg
diff --git a/tests/integration/targets/connection/test_connection.yml b/tests/integration/targets/connection/test_connection.yml
index bb0a993995..7f8b5697ce 100644
--- a/tests/integration/targets/connection/test_connection.yml
+++ b/tests/integration/targets/connection/test_connection.yml
@@ -8,41 +8,41 @@
serial: 1
tasks:
- ### raw with unicode arg and output
+ ### raw with unicode arg and output
- - name: raw with unicode arg and output
- raw: echo 汉语
- register: command
- - name: check output of raw with unicode arg and output
- assert:
- that:
- - "'汉语' in command.stdout"
- - command is changed # as of 2.2, raw should default to changed: true for consistency w/ shell/command/script modules
+ - name: raw with unicode arg and output
+ raw: echo 汉语
+ register: command
+ - name: check output of raw with unicode arg and output
+ assert:
+ that:
+ - "'汉语' in command.stdout"
+ - command is changed # as of 2.2, raw should default to changed: true for consistency w/ shell/command/script modules
- ### copy local file with unicode filename and content
+ ### copy local file with unicode filename and content
- - name: create local file with unicode filename and content
- local_action: lineinfile dest={{ local_tmp }}-汉语/汉语.txt create=true line=汉语
- - name: remove remote file with unicode filename and content
- action: "{{ action_prefix }}file path={{ remote_tmp }}-汉语/汉语.txt state=absent"
- - name: create remote directory with unicode name
- action: "{{ action_prefix }}file path={{ remote_tmp }}-汉语 state=directory"
- - name: copy local file with unicode filename and content
- action: "{{ action_prefix }}copy src={{ local_tmp }}-汉语/汉语.txt dest={{ remote_tmp }}-汉语/汉语.txt"
+ - name: create local file with unicode filename and content
+ local_action: lineinfile dest={{ local_tmp }}-汉语/汉语.txt create=true line=汉语
+ - name: remove remote file with unicode filename and content
+ action: "{{ action_prefix }}file path={{ remote_tmp }}-汉语/汉语.txt state=absent"
+ - name: create remote directory with unicode name
+ action: "{{ action_prefix }}file path={{ remote_tmp }}-汉语 state=directory"
+ - name: copy local file with unicode filename and content
+ action: "{{ action_prefix }}copy src={{ local_tmp }}-汉语/汉语.txt dest={{ remote_tmp }}-汉语/汉语.txt"
- ### fetch remote file with unicode filename and content
+ ### fetch remote file with unicode filename and content
- - name: remove local file with unicode filename and content
- local_action: file path={{ local_tmp }}-汉语/汉语.txt state=absent
- - name: fetch remote file with unicode filename and content
- fetch: src={{ remote_tmp }}-汉语/汉语.txt dest={{ local_tmp }}-汉语/汉语.txt fail_on_missing=true validate_checksum=true flat=true
+ - name: remove local file with unicode filename and content
+ local_action: file path={{ local_tmp }}-汉语/汉语.txt state=absent
+ - name: fetch remote file with unicode filename and content
+ fetch: src={{ remote_tmp }}-汉语/汉语.txt dest={{ local_tmp }}-汉语/汉语.txt fail_on_missing=true validate_checksum=true flat=true
- ### remove local and remote temp files
+ ### remove local and remote temp files
- - name: remove local temp file
- local_action: file path={{ local_tmp }}-汉语 state=absent
- - name: remove remote temp file
- action: "{{ action_prefix }}file path={{ remote_tmp }}-汉语 state=absent"
+ - name: remove local temp file
+ local_action: file path={{ local_tmp }}-汉语 state=absent
+ - name: remove remote temp file
+ action: "{{ action_prefix }}file path={{ remote_tmp }}-汉语 state=absent"
- ### test wait_for_connection plugin
- - ansible.builtin.wait_for_connection:
+ ### test wait_for_connection plugin
+ - ansible.builtin.wait_for_connection:
diff --git a/tests/integration/targets/connection_proxmox_pct_remote/dependencies.yml b/tests/integration/targets/connection_proxmox_pct_remote/dependencies.yml
deleted file mode 100644
index c0a6718e32..0000000000
--- a/tests/integration/targets/connection_proxmox_pct_remote/dependencies.yml
+++ /dev/null
@@ -1,18 +0,0 @@
----
-# Copyright (c) 2025 Nils Stein (@mietzen)
-# Copyright (c) 2025 Ansible Project
-# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-- hosts: localhost
- gather_facts: true
- serial: 1
- tasks:
- - name: Copy pct mock
- copy:
- src: files/pct
- dest: /usr/sbin/pct
- mode: '0755'
- - name: Install paramiko
- pip:
- name: "paramiko>=3.0.0"
diff --git a/tests/integration/targets/connection_proxmox_pct_remote/files/pct b/tests/integration/targets/connection_proxmox_pct_remote/files/pct
deleted file mode 100755
index 8a40280041..0000000000
--- a/tests/integration/targets/connection_proxmox_pct_remote/files/pct
+++ /dev/null
@@ -1,33 +0,0 @@
-#!/usr/bin/env bash
-# Copyright (c) 2025 Nils Stein (@mietzen)
-# Copyright (c) 2025 Ansible Project
-# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# Shell script to mock proxmox pct behaviour
-
->&2 echo "[DEBUG] INPUT: $@"
-
-pwd="$(pwd)"
-
-# Get quoted parts and restore quotes
-declare -a cmd=()
-for arg in "$@"; do
- if [[ $arg =~ [[:space:]] ]]; then
- arg="'$arg'"
- fi
- cmd+=("$arg")
-done
-
-cmd="${cmd[@]:3}"
-vmid="${@:2:1}"
->&2 echo "[INFO] MOCKING: pct ${@:1:3} ${cmd}"
-tmp_dir="/tmp/ansible-remote/proxmox_pct_remote/integration_test/ct_${vmid}"
-mkdir -p "$tmp_dir"
->&2 echo "[INFO] PWD: $tmp_dir"
->&2 echo "[INFO] CMD: ${cmd}"
-cd "$tmp_dir"
-
-eval "${cmd}"
-
-cd "$pwd"
diff --git a/tests/integration/targets/connection_proxmox_pct_remote/plugin-specific-tests.yml b/tests/integration/targets/connection_proxmox_pct_remote/plugin-specific-tests.yml
deleted file mode 100644
index 41fe06cdb9..0000000000
--- a/tests/integration/targets/connection_proxmox_pct_remote/plugin-specific-tests.yml
+++ /dev/null
@@ -1,32 +0,0 @@
----
-# Copyright (c) Ansible Project
-# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-- hosts: "{{ target_hosts }}"
- gather_facts: false
- serial: 1
- tasks:
- - name: create file without content
- copy:
- content: ""
- dest: "{{ remote_tmp }}/test_empty.txt"
- force: no
- mode: '0644'
-
- - name: assert file without content exists
- stat:
- path: "{{ remote_tmp }}/test_empty.txt"
- register: empty_file_stat
-
- - name: verify file without content exists
- assert:
- that:
- - empty_file_stat.stat.exists
- fail_msg: "The file {{ remote_tmp }}/test_empty.txt does not exist."
-
- - name: verify file without content is empty
- assert:
- that:
- - empty_file_stat.stat.size == 0
- fail_msg: "The file {{ remote_tmp }}/test_empty.txt is not empty."
diff --git a/tests/integration/targets/connection_proxmox_pct_remote/runme.sh b/tests/integration/targets/connection_proxmox_pct_remote/runme.sh
deleted file mode 100755
index 5d27e243d4..0000000000
--- a/tests/integration/targets/connection_proxmox_pct_remote/runme.sh
+++ /dev/null
@@ -1,19 +0,0 @@
-#!/usr/bin/env bash
-# Copyright (c) 2025 Nils Stein (@mietzen)
-# Copyright (c) 2025 Ansible Project
-# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-set -eux
-
-ANSIBLE_ROLES_PATH=../ \
- ansible-playbook dependencies.yml -v "$@"
-
-./test.sh "$@"
-
-ansible-playbook plugin-specific-tests.yml -i "./test_connection.inventory" \
- -e target_hosts="proxmox_pct_remote" \
- -e action_prefix= \
- -e local_tmp=/tmp/ansible-local \
- -e remote_tmp=/tmp/ansible-remote \
- "$@"
diff --git a/tests/integration/targets/connection_proxmox_pct_remote/test.sh b/tests/integration/targets/connection_proxmox_pct_remote/test.sh
deleted file mode 120000
index 70aa5dbdba..0000000000
--- a/tests/integration/targets/connection_proxmox_pct_remote/test.sh
+++ /dev/null
@@ -1 +0,0 @@
-../connection_posix/test.sh
\ No newline at end of file
diff --git a/tests/integration/targets/connection_proxmox_pct_remote/test_connection.inventory b/tests/integration/targets/connection_proxmox_pct_remote/test_connection.inventory
deleted file mode 100644
index 15592a61a6..0000000000
--- a/tests/integration/targets/connection_proxmox_pct_remote/test_connection.inventory
+++ /dev/null
@@ -1,14 +0,0 @@
-# Copyright (c) 2025 Nils Stein (@mietzen)
-# Copyright (c) 2025 Ansible Project
-# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-[proxmox_pct_remote]
-proxmox_pct_remote-pipelining ansible_ssh_pipelining=true
-proxmox_pct_remote-no-pipelining ansible_ssh_pipelining=false
-[proxmox_pct_remote:vars]
-ansible_host=localhost
-ansible_user=root
-ansible_python_interpreter="{{ ansible_playbook_python }}"
-ansible_connection=community.general.proxmox_pct_remote
-proxmox_vmid=123
diff --git a/tests/integration/targets/connection_wsl/plugin-specific-tests.yml b/tests/integration/targets/connection_wsl/plugin-specific-tests.yml
index 228c34dc51..e7d7434b81 100644
--- a/tests/integration/targets/connection_wsl/plugin-specific-tests.yml
+++ b/tests/integration/targets/connection_wsl/plugin-specific-tests.yml
@@ -10,7 +10,7 @@
copy:
content: ""
dest: "{{ remote_tmp }}/test_empty.txt"
- force: no
+ force: false
mode: '0644'
- name: assert file without content exists
diff --git a/tests/integration/targets/consul/tasks/consul_binding_rule.yml b/tests/integration/targets/consul/tasks/consul_binding_rule.yml
index 57f487d146..1ada2a330b 100644
--- a/tests/integration/targets/consul/tasks/consul_binding_rule.yml
+++ b/tests/integration/targets/consul/tasks/consul_binding_rule.yml
@@ -18,7 +18,7 @@
0iT9wCS0DRTXu269V264Vf/3jvredZiKRkgwlL9xNAwxXFg0x/XFw005UWVRIkdg
cKWTjpBP2dPwVZ4WWC+9aGVd+Gyn1o0CLelf4rEjGoXbAAEgAqeGUxrcIlbjXfbc
mwIDAQAB
- -----END PUBLIC KEY-----
+ -----END PUBLIC KEY-----
- name: Create a binding rule
community.general.consul_binding_rule:
diff --git a/tests/integration/targets/consul/tasks/consul_kv.yml b/tests/integration/targets/consul/tasks/consul_kv.yml
index 6cca73137a..52b95ddd3d 100644
--- a/tests/integration/targets/consul/tasks/consul_kv.yml
+++ b/tests/integration/targets/consul/tasks/consul_kv.yml
@@ -15,10 +15,10 @@
- result is changed
- result.data.Value == 'somevalue'
-#- name: Test the lookup
-# assert:
-# that:
-# - lookup('community.general.consul_kv', 'somekey', token=consul_management_token) == 'somevalue'
+# - name: Test the lookup
+# assert:
+# that:
+# - lookup('community.general.consul_kv', 'somekey', token=consul_management_token) == 'somevalue'
- name: Update a key with the same data
consul_kv:
diff --git a/tests/integration/targets/consul/tasks/consul_role.yml b/tests/integration/targets/consul/tasks/consul_role.yml
index 9b0504e0b6..57193a0b94 100644
--- a/tests/integration/targets/consul/tasks/consul_role.yml
+++ b/tests/integration/targets/consul/tasks/consul_role.yml
@@ -44,7 +44,7 @@
consul_role:
name: foo-role-with-policy
description: "Testing updating description"
- check_mode: yes
+ check_mode: true
register: result
- assert:
@@ -106,7 +106,7 @@
datacenters:
- dc2
register: result
- check_mode: yes
+ check_mode: true
- assert:
that:
@@ -146,7 +146,7 @@
name: role-with-service-identity
node_identities: []
register: result
- check_mode: yes
+ check_mode: true
- assert:
that:
diff --git a/tests/integration/targets/consul/tasks/main.yml b/tests/integration/targets/consul/tasks/main.yml
index 0ac58fc40e..04e2d1b2b5 100644
--- a/tests/integration/targets/consul/tasks/main.yml
+++ b/tests/integration/targets/consul/tasks/main.yml
@@ -14,96 +14,96 @@
consul_uri: https://releases.hashicorp.com/consul/{{ consul_version }}/consul_{{ consul_version }}_{{ ansible_system | lower }}_{{ consul_arch }}.zip
consul_cmd: '{{ remote_tmp_dir }}/consul'
block:
- - name: Install requests<2.20 (CentOS/RHEL 6)
- pip:
- name: requests<2.20
- extra_args: "-c {{ remote_constraints }}"
- register: result
- until: result is success
- when: ansible_distribution_file_variety|default() == 'RedHat' and ansible_distribution_major_version is version('6', '<=')
- - name: Install python-consul
- pip:
- name: python-consul
- extra_args: "-c {{ remote_constraints }}"
- register: result
- until: result is success
- - name: Generate privatekey
- community.crypto.openssl_privatekey:
- path: '{{ remote_tmp_dir }}/privatekey.pem'
- - name: Generate CSR
- community.crypto.openssl_csr:
- path: '{{ remote_tmp_dir }}/csr.csr'
- privatekey_path: '{{ remote_tmp_dir }}/privatekey.pem'
- subject:
- commonName: localhost
- - name: Generate selfsigned certificate
- register: selfsigned_certificate
- community.crypto.x509_certificate:
- path: '{{ remote_tmp_dir }}/cert.pem'
- csr_path: '{{ remote_tmp_dir }}/csr.csr'
- privatekey_path: '{{ remote_tmp_dir }}/privatekey.pem'
- provider: selfsigned
- selfsigned_digest: sha256
- - name: Install unzip
- package:
- name: unzip
- register: result
- until: result is success
- when: ansible_distribution != "MacOSX"
- - assert:
- that: ansible_architecture in ['i386', 'x86_64', 'amd64']
- - set_fact:
- consul_arch: '386'
- when: ansible_architecture == 'i386'
- - set_fact:
- consul_arch: amd64
- when: ansible_architecture in ['x86_64', 'amd64']
- - name: Download consul binary
- unarchive:
- src: '{{ consul_uri }}'
- dest: '{{ remote_tmp_dir }}'
- remote_src: true
- register: result
- until: result is success
- - vars:
- remote_dir: '{{ echo_remote_tmp_dir.stdout }}'
- block:
- - command: echo {{ remote_tmp_dir }}
- register: echo_remote_tmp_dir
- - name: Create configuration file
- template:
- src: consul_config.hcl.j2
- dest: '{{ remote_tmp_dir }}/consul_config.hcl'
- - name: Start Consul (dev mode enabled)
- shell: nohup {{ consul_cmd }} agent -dev -config-file {{ remote_tmp_dir }}/consul_config.hcl /dev/null 2>&1 &
- - name: Bootstrap ACL
- consul_acl_bootstrap:
- register: consul_bootstrap_result
+ - name: Install requests<2.20 (CentOS/RHEL 6)
+ pip:
+ name: requests<2.20
+ extra_args: "-c {{ remote_constraints }}"
+ register: result
+ until: result is success
+ when: ansible_distribution_file_variety|default() == 'RedHat' and ansible_distribution_major_version is version('6', '<=')
+ - name: Install python-consul
+ pip:
+ name: python-consul
+ extra_args: "-c {{ remote_constraints }}"
+ register: result
+ until: result is success
+ - name: Generate privatekey
+ community.crypto.openssl_privatekey:
+ path: '{{ remote_tmp_dir }}/privatekey.pem'
+ - name: Generate CSR
+ community.crypto.openssl_csr:
+ path: '{{ remote_tmp_dir }}/csr.csr'
+ privatekey_path: '{{ remote_tmp_dir }}/privatekey.pem'
+ subject:
+ commonName: localhost
+ - name: Generate selfsigned certificate
+ register: selfsigned_certificate
+ community.crypto.x509_certificate:
+ path: '{{ remote_tmp_dir }}/cert.pem'
+ csr_path: '{{ remote_tmp_dir }}/csr.csr'
+ privatekey_path: '{{ remote_tmp_dir }}/privatekey.pem'
+ provider: selfsigned
+ selfsigned_digest: sha256
+ - name: Install unzip
+ package:
+ name: unzip
+ register: result
+ until: result is success
+ when: ansible_distribution != "MacOSX"
+ - assert:
+ that: ansible_architecture in ['i386', 'x86_64', 'amd64']
- set_fact:
- consul_management_token: '{{ consul_bootstrap_result.result.SecretID }}'
- - name: Create some data
- command: '{{ consul_cmd }} kv put -token={{consul_management_token}} data/value{{ item }} foo{{ item }}'
- loop:
- - 1
- - 2
- - 3
- - import_tasks: consul_general.yml
- - import_tasks: consul_kv.yml
+ consul_arch: '386'
+ when: ansible_architecture == 'i386'
+ - set_fact:
+ consul_arch: amd64
+ when: ansible_architecture in ['x86_64', 'amd64']
+ - name: Download consul binary
+ unarchive:
+ src: '{{ consul_uri }}'
+ dest: '{{ remote_tmp_dir }}'
+ remote_src: true
+ register: result
+ until: result is success
+ - vars:
+ remote_dir: '{{ echo_remote_tmp_dir.stdout }}'
+ block:
+ - command: echo {{ remote_tmp_dir }}
+ register: echo_remote_tmp_dir
+ - name: Create configuration file
+ template:
+ src: consul_config.hcl.j2
+ dest: '{{ remote_tmp_dir }}/consul_config.hcl'
+ - name: Start Consul (dev mode enabled)
+ shell: nohup {{ consul_cmd }} agent -dev -config-file {{ remote_tmp_dir }}/consul_config.hcl /dev/null 2>&1 &
+ - name: Bootstrap ACL
+ consul_acl_bootstrap:
+ register: consul_bootstrap_result
+ - set_fact:
+ consul_management_token: '{{ consul_bootstrap_result.result.SecretID }}'
+ - name: Create some data
+ command: '{{ consul_cmd }} kv put -token={{consul_management_token}} data/value{{ item }} foo{{ item }}'
+ loop:
+ - 1
+ - 2
+ - 3
+ - import_tasks: consul_general.yml
+ - import_tasks: consul_kv.yml
- - block:
- - import_tasks: consul_session.yml
- - import_tasks: consul_policy.yml
- - import_tasks: consul_role.yml
- - import_tasks: consul_token.yml
- - import_tasks: consul_auth_method.yml
- - import_tasks: consul_binding_rule.yml
- - import_tasks: consul_agent_service.yml
- - import_tasks: consul_agent_check.yml
- module_defaults:
- group/community.general.consul:
- token: "{{ consul_management_token }}"
+ - block:
+ - import_tasks: consul_session.yml
+ - import_tasks: consul_policy.yml
+ - import_tasks: consul_role.yml
+ - import_tasks: consul_token.yml
+ - import_tasks: consul_auth_method.yml
+ - import_tasks: consul_binding_rule.yml
+ - import_tasks: consul_agent_service.yml
+ - import_tasks: consul_agent_check.yml
+ module_defaults:
+ group/community.general.consul:
+ token: "{{ consul_management_token }}"
- always:
+ always:
- name: Kill consul process
shell: kill $(cat {{ remote_tmp_dir }}/consul.pid)
- ignore_errors: true
\ No newline at end of file
+ ignore_errors: true
diff --git a/tests/integration/targets/copr/aliases b/tests/integration/targets/copr/aliases
index ed3c1af00d..d333eac1a1 100644
--- a/tests/integration/targets/copr/aliases
+++ b/tests/integration/targets/copr/aliases
@@ -7,3 +7,4 @@ needs/root
skip/macos
skip/osx
skip/freebsd
+skip/rhel10.0 # FIXME
diff --git a/tests/integration/targets/copr/tasks/main.yml b/tests/integration/targets/copr/tasks/main.yml
index 0d66378112..4fc5ae5c08 100644
--- a/tests/integration/targets/copr/tasks/main.yml
+++ b/tests/integration/targets/copr/tasks/main.yml
@@ -11,133 +11,133 @@
or (ansible_os_family == 'RedHat' and ansible_distribution != 'Fedora'
and ansible_distribution_major_version | int >= 8)
block:
- - debug: var=copr_chroot
- - name: enable copr project
- copr:
- host: copr.fedorainfracloud.org
- state: enabled
- name: '{{ copr_fullname }}'
- chroot: "{{ copr_chroot }}"
- register: result
+ - debug: var=copr_chroot
+ - name: enable copr project
+ copr:
+ host: copr.fedorainfracloud.org
+ state: enabled
+ name: '{{ copr_fullname }}'
+ chroot: "{{ copr_chroot }}"
+ register: result
- - name: assert that the copr project was enabled
- assert:
- that:
- - 'result is changed'
- - result.msg == 'enabled'
- - result.info == 'Please note that this repository is not part of the main distribution'
+ - name: assert that the copr project was enabled
+ assert:
+ that:
+ - 'result is changed'
+ - result.msg == 'enabled'
+ - result.info == 'Please note that this repository is not part of the main distribution'
- - name: enable copr project
- check_mode: true
- copr:
- state: enabled
- name: '{{ copr_fullname }}'
- chroot: '{{ copr_chroot }}'
- register: result
+ - name: enable copr project
+ check_mode: true
+ copr:
+ state: enabled
+ name: '{{ copr_fullname }}'
+ chroot: '{{ copr_chroot }}'
+ register: result
- - name: assert that the copr project was enabled
- assert:
- that:
- - result is not changed
- - result.msg == 'enabled'
+ - name: assert that the copr project was enabled
+ assert:
+ that:
+ - result is not changed
+ - result.msg == 'enabled'
- - name: Ensure the repo is installed and enabled | slurp
- register: result
- ansible.builtin.slurp:
- src: "{{ copr_repofile }}"
+ - name: Ensure the repo is installed and enabled | slurp
+ register: result
+ ansible.builtin.slurp:
+ src: "{{ copr_repofile }}"
- - name: Ensure the repo is installed and enabled
- vars:
- content: "{{ result.content | b64decode }}"
- _baseurl: "{{ 'https://download.copr.fedorainfracloud.org/results/gotmax23/community.general.copr_integration_tests' | regex_escape }}"
- baseurl: "{{ content | regex_search('baseurl=' ~ _baseurl) }}"
- block:
- - ansible.builtin.debug:
- var: content
- - ansible.builtin.debug:
- var: baseurl
- - name: Ensure the repo is installed and enabled
- ansible.builtin.assert:
- that:
- - "'enabled=1' in content"
- - baseurl | length > 0
+ - name: Ensure the repo is installed and enabled
+ vars:
+ content: "{{ result.content | b64decode }}"
+ _baseurl: "{{ 'https://download.copr.fedorainfracloud.org/results/gotmax23/community.general.copr_integration_tests' | regex_escape }}"
+ baseurl: "{{ content | regex_search('baseurl=' ~ _baseurl) }}"
+ block:
+ - ansible.builtin.debug:
+ var: content
+ - ansible.builtin.debug:
+ var: baseurl
+ - name: Ensure the repo is installed and enabled
+ ansible.builtin.assert:
+ that:
+ - "'enabled=1' in content"
+ - baseurl | length > 0
- - name: Install test package from Copr
- when:
- # Copr does not build new packages for EOL Fedoras.
- - >
- not (ansible_distribution == 'Fedora' and
- ansible_distribution_major_version | int < 35)
- block:
- - name: install test package from the copr
- ansible.builtin.package:
- update_cache: true
- name: copr-module-integration-dummy-package
+ - name: Install test package from Copr
+ when:
+ # Copr does not build new packages for EOL Fedoras.
+ - >
+ not (ansible_distribution == 'Fedora' and
+ ansible_distribution_major_version | int < 35)
+ block:
+ - name: install test package from the copr
+ ansible.builtin.package:
+ update_cache: true
+ name: copr-module-integration-dummy-package
- - name: uninstall test package
- register: result
- ansible.builtin.package:
- name: copr-module-integration-dummy-package
- state: absent
+ - name: uninstall test package
+ register: result
+ ansible.builtin.package:
+ name: copr-module-integration-dummy-package
+ state: absent
- - name: check uninstall test package
- ansible.builtin.assert:
- that: result.changed | bool
+ - name: check uninstall test package
+ ansible.builtin.assert:
+ that: result.changed | bool
- - name: remove copr project
- copr:
- state: absent
- name: '{{ copr_fullname }}'
- register: result
+ - name: remove copr project
+ copr:
+ state: absent
+ name: '{{ copr_fullname }}'
+ register: result
- - name: assert that the copr project was removed
- assert:
- that:
- - 'result is changed'
- - result.msg == 'absent'
+ - name: assert that the copr project was removed
+ assert:
+ that:
+ - 'result is changed'
+ - result.msg == 'absent'
- - name: Ensure the repo file was removed | stat
- register: result
- ansible.builtin.stat:
- dest: "{{ copr_repofile }}"
+ - name: Ensure the repo file was removed | stat
+ register: result
+ ansible.builtin.stat:
+ dest: "{{ copr_repofile }}"
- - name: Ensure the repo file was removed
- ansible.builtin.assert:
- that: not result.stat.exists | bool
+ - name: Ensure the repo file was removed
+ ansible.builtin.assert:
+ that: not result.stat.exists | bool
- - name: disable copr project
- copr:
- state: disabled
- name: '{{ copr_fullname }}'
- chroot: '{{ copr_chroot }}'
- register: result
+ - name: disable copr project
+ copr:
+ state: disabled
+ name: '{{ copr_fullname }}'
+ chroot: '{{ copr_chroot }}'
+ register: result
- - name: assert that the copr project was disabled
- assert:
- that:
- - 'result is changed'
- - result.msg == 'disabled'
+ - name: assert that the copr project was disabled
+ assert:
+ that:
+ - 'result is changed'
+ - result.msg == 'disabled'
- - name: Ensure the repo is installed but disabled | slurp
- register: result
- ansible.builtin.slurp:
- src: "{{ copr_repofile }}"
+ - name: Ensure the repo is installed but disabled | slurp
+ register: result
+ ansible.builtin.slurp:
+ src: "{{ copr_repofile }}"
- - name: Ensure the repo is installed but disabled
- vars:
- content: "{{ result.content | b64decode }}"
- _baseurl: "{{ 'https://download.copr.fedorainfracloud.org/results/gotmax23/community.general.copr_integration_tests' | regex_escape }}"
- baseurl: "{{ content | regex_search('baseurl=' ~ _baseurl) }}"
- block:
- - ansible.builtin.debug:
- var: content
- - ansible.builtin.debug:
- var: baseurl
- - name: Ensure the repo is installed but disabled
- ansible.builtin.assert:
- that:
- - "'enabled=0' in content"
- - baseurl | length > 0
+ - name: Ensure the repo is installed but disabled
+ vars:
+ content: "{{ result.content | b64decode }}"
+ _baseurl: "{{ 'https://download.copr.fedorainfracloud.org/results/gotmax23/community.general.copr_integration_tests' | regex_escape }}"
+ baseurl: "{{ content | regex_search('baseurl=' ~ _baseurl) }}"
+ block:
+ - ansible.builtin.debug:
+ var: content
+ - ansible.builtin.debug:
+ var: baseurl
+ - name: Ensure the repo is installed but disabled
+ ansible.builtin.assert:
+ that:
+ - "'enabled=0' in content"
+ - baseurl | length > 0
always:
- name: clean up
diff --git a/tests/integration/targets/cronvar/tasks/main.yml b/tests/integration/targets/cronvar/tasks/main.yml
index 73ec41abca..0d3ae30daf 100644
--- a/tests/integration/targets/cronvar/tasks/main.yml
+++ b/tests/integration/targets/cronvar/tasks/main.yml
@@ -122,3 +122,36 @@
- custom_varcheck1.stdout == '1'
- custom_varcheck2.stdout == '1'
- custom_varcheck3.stdout == '0'
+
+
+- name: Add variable with empty string
+ community.general.cronvar:
+ name: EMPTY_VAR
+ value: ""
+ state: present
+
+- name: Assert empty var present
+ ansible.builtin.shell: crontab -l
+ register: result
+ changed_when: false
+
+- name: Assert line is quoted
+ ansible.builtin.assert:
+ that: >-
+ 'EMPTY_VAR=""' in result.stdout
+
+- name: Attempt to add cron variable to non-existent parent directory
+ cronvar:
+ name: NOPARENT_VAR
+ value: noparentval
+ cron_file: /nonexistent/foo
+ user: root
+ register: invalid_directory_cronvar_result
+ ignore_errors: true
+
+- name: Assert that the cronvar task failed due to invalid directory
+ ansible.builtin.assert:
+ that:
+ - invalid_directory_cronvar_result is failed
+ - >-
+ "Parent directory '/nonexistent' does not exist for cron_file: '/nonexistent/foo'" == invalid_directory_cronvar_result.msg
diff --git a/tests/integration/targets/decompress/tasks/dest.yml b/tests/integration/targets/decompress/tasks/dest.yml
index 9a7bbe499f..4afd39e7b3 100644
--- a/tests/integration/targets/decompress/tasks/dest.yml
+++ b/tests/integration/targets/decompress/tasks/dest.yml
@@ -34,7 +34,8 @@
- name: Test that file exists
assert:
- that: "{{ item.stat.exists }}"
+ that:
+ - item.stat.exists
quiet: true
loop: "{{ result_files_stat.results }}"
loop_control:
diff --git a/tests/integration/targets/deploy_helper/tasks/main.yml b/tests/integration/targets/deploy_helper/tasks/main.yml
index e0fdee83e9..84eb1640bd 100644
--- a/tests/integration/targets/deploy_helper/tasks/main.yml
+++ b/tests/integration/targets/deploy_helper/tasks/main.yml
@@ -16,43 +16,43 @@
- name: Assert State=query with default parameters
assert:
that:
- - "'project_path' in deploy_helper"
- - "deploy_helper.current_path == deploy_helper.project_path ~ '/current'"
- - "deploy_helper.releases_path == deploy_helper.project_path ~ '/releases'"
- - "deploy_helper.shared_path == deploy_helper.project_path ~ '/shared'"
- - "deploy_helper.unfinished_filename == 'DEPLOY_UNFINISHED'"
- - "'previous_release' in deploy_helper"
- - "'previous_release_path' in deploy_helper"
- - "'new_release' in deploy_helper"
- - "'new_release_path' in deploy_helper"
- - "deploy_helper.new_release_path == deploy_helper.releases_path ~ '/' ~ deploy_helper.new_release"
+ - "'project_path' in deploy_helper"
+ - "deploy_helper.current_path == deploy_helper.project_path ~ '/current'"
+ - "deploy_helper.releases_path == deploy_helper.project_path ~ '/releases'"
+ - "deploy_helper.shared_path == deploy_helper.project_path ~ '/shared'"
+ - "deploy_helper.unfinished_filename == 'DEPLOY_UNFINISHED'"
+ - "'previous_release' in deploy_helper"
+ - "'previous_release_path' in deploy_helper"
+ - "'new_release' in deploy_helper"
+ - "'new_release_path' in deploy_helper"
+ - "deploy_helper.new_release_path == deploy_helper.releases_path ~ '/' ~ deploy_helper.new_release"
- name: State=query with relative overridden paths
deploy_helper: path={{ deploy_helper_test_root }} current_path=CURRENT_PATH releases_path=RELEASES_PATH shared_path=SHARED_PATH state=query
- name: Assert State=query with relative overridden paths
assert:
that:
- - "deploy_helper.current_path == deploy_helper.project_path ~ '/CURRENT_PATH'"
- - "deploy_helper.releases_path == deploy_helper.project_path ~ '/RELEASES_PATH'"
- - "deploy_helper.shared_path == deploy_helper.project_path ~ '/SHARED_PATH'"
- - "deploy_helper.new_release_path == deploy_helper.releases_path ~ '/' ~ deploy_helper.new_release"
+ - "deploy_helper.current_path == deploy_helper.project_path ~ '/CURRENT_PATH'"
+ - "deploy_helper.releases_path == deploy_helper.project_path ~ '/RELEASES_PATH'"
+ - "deploy_helper.shared_path == deploy_helper.project_path ~ '/SHARED_PATH'"
+ - "deploy_helper.new_release_path == deploy_helper.releases_path ~ '/' ~ deploy_helper.new_release"
- name: State=query with absolute overridden paths
deploy_helper: path={{ deploy_helper_test_root }} current_path=/CURRENT_PATH releases_path=/RELEASES_PATH shared_path=/SHARED_PATH state=query
- name: Assert State=query with absolute overridden paths
assert:
that:
- - "deploy_helper.current_path == '/CURRENT_PATH'"
- - "deploy_helper.releases_path == '/RELEASES_PATH'"
- - "deploy_helper.shared_path == '/SHARED_PATH'"
- - "deploy_helper.new_release_path == deploy_helper.releases_path ~ '/' ~ deploy_helper.new_release"
+ - "deploy_helper.current_path == '/CURRENT_PATH'"
+ - "deploy_helper.releases_path == '/RELEASES_PATH'"
+ - "deploy_helper.shared_path == '/SHARED_PATH'"
+ - "deploy_helper.new_release_path == deploy_helper.releases_path ~ '/' ~ deploy_helper.new_release"
- name: State=query with overridden unfinished_filename
deploy_helper: path={{ deploy_helper_test_root }} unfinished_filename=UNFINISHED_DEPLOY state=query
- name: Assert State=query with overridden unfinished_filename
assert:
that:
- - "'UNFINISHED_DEPLOY' == deploy_helper.unfinished_filename"
+ - "'UNFINISHED_DEPLOY' == deploy_helper.unfinished_filename"
# Remove the root folder just in case it exists
- file: path={{ deploy_helper_test_root }} state=absent
@@ -66,8 +66,8 @@
- name: Assert State=present with default parameters
assert:
that:
- - "releases_path.stat.exists"
- - "shared_path.stat.exists"
+ - "releases_path.stat.exists"
+ - "shared_path.stat.exists"
# Setup older releases for tests
- file: path={{ deploy_helper.releases_path }}/{{ item }} state=directory
@@ -88,9 +88,9 @@
- name: Assert State=finalize with default parameters
assert:
that:
- - "current_path.stat.islnk"
- - "deploy_helper.new_release_path in current_path.stat.lnk_source"
- - "not current_path_unfinished_filename.stat.exists"
+ - "current_path.stat.islnk"
+ - "deploy_helper.new_release_path in current_path.stat.lnk_source"
+ - "not current_path_unfinished_filename.stat.exists"
- stat: path={{ deploy_helper.releases_path }}/third
register: third_release_path
- shell: "ls {{ deploy_helper.releases_path }} | wc -l"
@@ -98,13 +98,13 @@
- name: Assert State=finalize with default parameters (clean=true checks)
assert:
that:
- - "not third_release_path.stat.exists"
- - "releases_count.stdout|trim == '6'"
+ - "not third_release_path.stat.exists"
+ - "releases_count.stdout|trim == '6'"
- deploy_helper: path={{ deploy_helper_test_root }} release={{ deploy_helper.new_release }} state=query
- name: Assert State=finalize with default parameters (previous_release checks)
assert:
that:
- - "deploy_helper.new_release == deploy_helper.previous_release"
+ - "deploy_helper.new_release == deploy_helper.previous_release"
- name: State=absent with default parameters
deploy_helper: path={{ deploy_helper_test_root }} state=absent
@@ -113,7 +113,7 @@
- name: Assert State=absent with default parameters
assert:
that:
- - "not project_path.stat.exists"
+ - "not project_path.stat.exists"
- debug: msg="Clearing all release data and facts ---------"
@@ -127,8 +127,8 @@
- name: Assert State=present with shared_path set to False
assert:
that:
- - "releases_path.stat.exists"
- - "deploy_helper.shared_path is falsy or not shared_path.stat.exists"
+ - "releases_path.stat.exists"
+ - "deploy_helper.shared_path is falsy or not shared_path.stat.exists"
# Setup older releases for tests
- file: path={{ deploy_helper.releases_path }}/{{ item }} state=directory
@@ -151,9 +151,9 @@
- name: Assert State=finalize with default parameters (clean=true checks)
assert:
that:
- - "not third_release_path.stat.exists"
- - "before_releases_count.stdout|trim == '6'"
- - "releases_count.stdout|trim == '3'"
+ - "not third_release_path.stat.exists"
+ - "before_releases_count.stdout|trim == '6'"
+ - "releases_count.stdout|trim == '3'"
# Remove the root folder
- file: path={{ deploy_helper_test_root }} state=absent
diff --git a/tests/integration/targets/discord/defaults/main.yml b/tests/integration/targets/discord/defaults/main.yml
index ef01141ca0..e53245324c 100644
--- a/tests/integration/targets/discord/defaults/main.yml
+++ b/tests/integration/targets/discord/defaults/main.yml
@@ -3,5 +3,5 @@
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-discord_id: 000
+discord_id: 0
discord_token: xxx
diff --git a/tests/integration/targets/django_command/aliases b/tests/integration/targets/django_command/aliases
index aa06ccd7b4..c14251f85e 100644
--- a/tests/integration/targets/django_command/aliases
+++ b/tests/integration/targets/django_command/aliases
@@ -20,3 +20,5 @@ skip/rhel9.2
skip/rhel9.3
skip/rhel9.4
skip/rhel9.5
+skip/rhel9.6
+skip/rhel10.0
diff --git a/tests/integration/targets/django_manage/aliases b/tests/integration/targets/django_manage/aliases
index aa06ccd7b4..c14251f85e 100644
--- a/tests/integration/targets/django_manage/aliases
+++ b/tests/integration/targets/django_manage/aliases
@@ -20,3 +20,5 @@ skip/rhel9.2
skip/rhel9.3
skip/rhel9.4
skip/rhel9.5
+skip/rhel9.6
+skip/rhel10.0
diff --git a/tests/integration/targets/filesize/tasks/basics.yml b/tests/integration/targets/filesize/tasks/basics.yml
index 3c06731899..d4675556a0 100644
--- a/tests/integration/targets/filesize/tasks/basics.yml
+++ b/tests/integration/targets/filesize/tasks/basics.yml
@@ -177,7 +177,6 @@
- filesize_stat_basic_14.stat.checksum == filesize_test_checksum
-
- name: Expand the file with 1 byte (57001B) (check mode)
community.general.filesize:
path: "{{ filesize_testfile }}"
@@ -253,7 +252,6 @@
- filesize_stat_basic_24.stat.checksum != filesize_test_checksum
-
- name: Expand the file up to 2 MiB (2*1024*1024 bytes) (check mode)
community.general.filesize:
path: "{{ filesize_testfile }}"
@@ -327,7 +325,6 @@
- filesize_stat_basic_34.stat.size == 2*1024**2
-
- name: Truncate the file to 57kB (57000B) (check mode)
community.general.filesize:
path: "{{ filesize_testfile }}"
@@ -404,7 +401,6 @@
- filesize_stat_basic_44.stat.checksum == filesize_test_checksum
-
- name: Remove test file
ansible.builtin.file:
path: "{{ filesize_testfile }}"
diff --git a/tests/integration/targets/filesize/tasks/floats.yml b/tests/integration/targets/filesize/tasks/floats.yml
index 6d1bde22c9..9c743f261d 100644
--- a/tests/integration/targets/filesize/tasks/floats.yml
+++ b/tests/integration/targets/filesize/tasks/floats.yml
@@ -89,7 +89,6 @@
- filesize_stat_float_04.stat.size == 512512
-
- name: Create a file with a size of 512.513kB (check mode)
community.general.filesize:
path: "{{ filesize_testfile }}"
@@ -166,7 +165,6 @@
- filesize_stat_float_14.stat.size == 512513
-
- name: Create a file with a size of 4.004MB (check mode)
community.general.filesize:
path: "{{ filesize_testfile }}"
diff --git a/tests/integration/targets/filesize/tasks/sparse.yml b/tests/integration/targets/filesize/tasks/sparse.yml
index 348a1eea1b..7c1b6744b2 100644
--- a/tests/integration/targets/filesize/tasks/sparse.yml
+++ b/tests/integration/targets/filesize/tasks/sparse.yml
@@ -119,7 +119,6 @@
- filesize_stat_sparse_06.stat.size == 2*1000**4
-
- name: Change sparse file size to 2TiB (check mode)
community.general.filesize:
path: "{{ filesize_testfile }}"
@@ -198,7 +197,6 @@
- filesize_stat_sparse_14.stat.size == 2199023255552
-
- name: Change sparse file size to 2.321TB (check mode)
community.general.filesize:
path: "{{ filesize_testfile }}"
@@ -279,7 +277,6 @@
- filesize_stat_sparse_24.stat.size == 2321000000000
-
- name: Remove test file
ansible.builtin.file:
path: "{{ filesize_testfile }}"
diff --git a/tests/integration/targets/filesize/tasks/symlinks.yml b/tests/integration/targets/filesize/tasks/symlinks.yml
index 0118896568..4f65d80c8c 100644
--- a/tests/integration/targets/filesize/tasks/symlinks.yml
+++ b/tests/integration/targets/filesize/tasks/symlinks.yml
@@ -13,14 +13,13 @@
- name: Create a broken symlink in the same directory
ansible.builtin.file:
- src: "{{ filesize_testfile | basename }}"
+ src: "{{ filesize_testfile | basename }}"
dest: "{{ filesize_testlink }}"
state: link
force: true
follow: false
-
- name: Create a file with a size of 512 kB (512000 bytes) (check mode)
community.general.filesize:
path: "{{ filesize_testlink }}"
@@ -85,7 +84,6 @@
- filesize_test_symlink_04.path != filesize_testlink
-
- name: Remove test file
ansible.builtin.file:
path: "{{ filesize_testfile }}"
diff --git a/tests/integration/targets/filesystem/tasks/reset_fs_uuid.yml b/tests/integration/targets/filesystem/tasks/reset_fs_uuid.yml
index 77dad22033..bd9aa607c4 100644
--- a/tests/integration/targets/filesystem/tasks/reset_fs_uuid.yml
+++ b/tests/integration/targets/filesystem/tasks/reset_fs_uuid.yml
@@ -8,52 +8,52 @@
- new_uuid | default(False)
- not (ansible_system == "FreeBSD" and fstype == "xfs")
block:
- - name: "Create filesystem ({{ fstype }})"
- community.general.filesystem:
- dev: '{{ dev }}'
- fstype: '{{ fstype }}'
- register: fs_result
+ - name: "Create filesystem ({{ fstype }})"
+ community.general.filesystem:
+ dev: '{{ dev }}'
+ fstype: '{{ fstype }}'
+ register: fs_result
- - name: "Get UUID of created filesystem"
- ansible.builtin.shell:
- cmd: "{{ get_uuid_cmd }}"
- changed_when: false
- register: uuid
+ - name: "Get UUID of created filesystem"
+ ansible.builtin.shell:
+ cmd: "{{ get_uuid_cmd }}"
+ changed_when: false
+ register: uuid
- - name: "Reset filesystem ({{ fstype }}) UUID"
- community.general.filesystem:
- dev: '{{ dev }}'
- fstype: '{{ fstype }}'
- uuid: "{{ new_uuid }}"
- register: fs_resetuuid_result
-
- - name: "Get UUID of the filesystem"
- ansible.builtin.shell:
- cmd: "{{ get_uuid_cmd }}"
- changed_when: false
- register: uuid2
-
- - name: "Assert that filesystem UUID is changed"
- ansible.builtin.assert:
- that:
- - 'fs_resetuuid_result is changed'
- - 'fs_resetuuid_result is success'
- - 'uuid.stdout != uuid2.stdout'
-
- - when:
- - (grow | bool and (fstype != "vfat" or resize_vfat)) or
- (fstype == "xfs" and ansible_system == "Linux" and
- ansible_distribution not in ["CentOS", "Ubuntu"])
- block:
- - name: "Reset filesystem ({{ fstype }}) UUID and resizefs"
- ignore_errors: true
+ - name: "Reset filesystem ({{ fstype }}) UUID"
community.general.filesystem:
dev: '{{ dev }}'
fstype: '{{ fstype }}'
uuid: "{{ new_uuid }}"
- resizefs: true
- register: fs_resetuuid_and_resizefs_result
+ register: fs_resetuuid_result
- - name: "Assert that filesystem UUID reset and resizefs failed"
+ - name: "Get UUID of the filesystem"
+ ansible.builtin.shell:
+ cmd: "{{ get_uuid_cmd }}"
+ changed_when: false
+ register: uuid2
+
+ - name: "Assert that filesystem UUID is changed"
ansible.builtin.assert:
- that: fs_resetuuid_and_resizefs_result is failed
+ that:
+ - 'fs_resetuuid_result is changed'
+ - 'fs_resetuuid_result is success'
+ - 'uuid.stdout != uuid2.stdout'
+
+ - when:
+ - (grow | bool and (fstype != "vfat" or resize_vfat)) or
+ (fstype == "xfs" and ansible_system == "Linux" and
+ ansible_distribution not in ["CentOS", "Ubuntu"])
+ block:
+ - name: "Reset filesystem ({{ fstype }}) UUID and resizefs"
+ ignore_errors: true
+ community.general.filesystem:
+ dev: '{{ dev }}'
+ fstype: '{{ fstype }}'
+ uuid: "{{ new_uuid }}"
+ resizefs: true
+ register: fs_resetuuid_and_resizefs_result
+
+ - name: "Assert that filesystem UUID reset and resizefs failed"
+ ansible.builtin.assert:
+ that: fs_resetuuid_and_resizefs_result is failed
diff --git a/tests/integration/targets/filesystem/tasks/set_fs_uuid_on_creation.yml b/tests/integration/targets/filesystem/tasks/set_fs_uuid_on_creation.yml
index f52c44d655..9ec45e9e5c 100644
--- a/tests/integration/targets/filesystem/tasks/set_fs_uuid_on_creation.yml
+++ b/tests/integration/targets/filesystem/tasks/set_fs_uuid_on_creation.yml
@@ -12,33 +12,33 @@
- new_uuid | default(False)
- not (ansible_system == "FreeBSD" and fstype == "xfs")
block:
- - name: "Create filesystem ({{ fstype }}) with UUID"
- community.general.filesystem:
- dev: '{{ dev }}'
- fstype: '{{ fstype }}'
- uuid: '{{ random_uuid }}'
- register: fs_result
+ - name: "Create filesystem ({{ fstype }}) with UUID"
+ community.general.filesystem:
+ dev: '{{ dev }}'
+ fstype: '{{ fstype }}'
+ uuid: '{{ random_uuid }}'
+ register: fs_result
- - name: "Get UUID of the created filesystem"
- ansible.builtin.shell:
- cmd: "{{ get_uuid_cmd }}"
- changed_when: false
- register: uuid
+ - name: "Get UUID of the created filesystem"
+ ansible.builtin.shell:
+ cmd: "{{ get_uuid_cmd }}"
+ changed_when: false
+ register: uuid
- - name: "Assert that filesystem UUID is the random UUID set on creation"
- ansible.builtin.assert:
- that: (random_uuid | replace('-','')) == ( uuid.stdout | replace('-',''))
+ - name: "Assert that filesystem UUID is the random UUID set on creation"
+ ansible.builtin.assert:
+ that: (random_uuid | replace('-','')) == ( uuid.stdout | replace('-',''))
- when: not (new_uuid | default(False))
block:
- - name: "Create filesystem ({{ fstype }}) without UUID support"
- ignore_errors: true
- community.general.filesystem:
- dev: '{{ dev }}'
- fstype: '{{ fstype }}'
- uuid: '{{ random_uuid }}'
- register: fs_result
+ - name: "Create filesystem ({{ fstype }}) without UUID support"
+ ignore_errors: true
+ community.general.filesystem:
+ dev: '{{ dev }}'
+ fstype: '{{ fstype }}'
+ uuid: '{{ random_uuid }}'
+ register: fs_result
- - name: "Assert that filesystem creation failed"
- ansible.builtin.assert:
- that: fs_result is failed
+ - name: "Assert that filesystem creation failed"
+ ansible.builtin.assert:
+ that: fs_result is failed
diff --git a/tests/integration/targets/filesystem/tasks/set_fs_uuid_on_creation_with_opts.yml b/tests/integration/targets/filesystem/tasks/set_fs_uuid_on_creation_with_opts.yml
index fc73e57ee2..e89668bad0 100644
--- a/tests/integration/targets/filesystem/tasks/set_fs_uuid_on_creation_with_opts.yml
+++ b/tests/integration/targets/filesystem/tasks/set_fs_uuid_on_creation_with_opts.yml
@@ -9,25 +9,25 @@
- fstype != "xfs"
block:
- - name: "Generate random UUIDs"
- ansible.builtin.set_fact:
- random_uuid: '{{ "first_random_uuid" | ansible.builtin.to_uuid }}'
- random_uuid2: '{{ "second_random_uuid" | ansible.builtin.to_uuid }}'
+ - name: "Generate random UUIDs"
+ ansible.builtin.set_fact:
+ random_uuid: '{{ "first_random_uuid" | ansible.builtin.to_uuid }}'
+ random_uuid2: '{{ "second_random_uuid" | ansible.builtin.to_uuid }}'
- - name: "Create filesystem ({{ fstype }}) with fix UUID as opt"
- community.general.filesystem:
- dev: '{{ dev }}'
- fstype: '{{ fstype }}'
- opts: "{{ ((fstype == 'lvm') | ansible.builtin.ternary('--norestorefile --uuid ', '-U ')) + random_uuid2 }}"
- uuid: '{{ random_uuid }}'
- register: fs_result2
+ - name: "Create filesystem ({{ fstype }}) with fix UUID as opt"
+ community.general.filesystem:
+ dev: '{{ dev }}'
+ fstype: '{{ fstype }}'
+ opts: "{{ ((fstype == 'lvm') | ansible.builtin.ternary('--norestorefile --uuid ', '-U ')) + random_uuid2 }}"
+ uuid: '{{ random_uuid }}'
+ register: fs_result2
- - name: "Get UUID of the created filesystem"
- ansible.builtin.shell:
- cmd: "{{ get_uuid_cmd }}"
- changed_when: false
- register: uuid2
+ - name: "Get UUID of the created filesystem"
+ ansible.builtin.shell:
+ cmd: "{{ get_uuid_cmd }}"
+ changed_when: false
+ register: uuid2
- - name: "Assert that filesystem UUID is the one set on creation with opt"
- ansible.builtin.assert:
- that: (random_uuid2 | replace('-','')) == ( uuid2.stdout | replace('-',''))
+ - name: "Assert that filesystem UUID is the one set on creation with opt"
+ ansible.builtin.assert:
+ that: (random_uuid2 | replace('-','')) == ( uuid2.stdout | replace('-',''))
diff --git a/tests/integration/targets/filter_jc/aliases b/tests/integration/targets/filter_jc/aliases
index 62fbc2daba..978a58095d 100644
--- a/tests/integration/targets/filter_jc/aliases
+++ b/tests/integration/targets/filter_jc/aliases
@@ -8,3 +8,4 @@ skip/freebsd13.3 # FIXME - ruyaml compilation fails
skip/freebsd14.0 # FIXME - ruyaml compilation fails
skip/freebsd14.1 # FIXME - ruyaml compilation fails
skip/freebsd14.2 # FIXME - ruyaml compilation fails
+skip/freebsd14.3 # FIXME - ruyaml compilation fails
diff --git a/tests/integration/targets/filter_json_query/tasks/main.yml b/tests/integration/targets/filter_json_query/tasks/main.yml
index 92db6d876a..0195ddb5dd 100644
--- a/tests/integration/targets/filter_json_query/tasks/main.yml
+++ b/tests/integration/targets/filter_json_query/tasks/main.yml
@@ -11,4 +11,23 @@
- name: Test json_query filter
assert:
that:
- - "users | community.general.json_query('[*].hosts[].host') == ['host_a', 'host_b', 'host_c', 'host_d']"
+ - >-
+ users | community.general.json_query('[*].hosts[].host') == ['host_a', 'host_b', 'host_c', 'host_d']
+ - >-
+ ports | json_query("[?contains(ports, `22`)]") == [ports[0]]
+ - >-
+ ports | json_query("[?contains(rule_desc, `ssh`)]") == [ports[0]]
+ - >-
+ my_complex_data | json_query('users[?id==`1`]') == [my_complex_data['users'][0]]
+ vars:
+ my_complex_data:
+ users:
+ - id: 1
+ name: Alice
+ roles: ["admin", "dev"]
+ status: active
+ ports:
+ - ports: [22]
+ rule_desc: "ssh"
+ - ports: [80]
+ rule_desc: "http"
diff --git a/tests/integration/targets/filter_keep_keys/vars/main/tests.yml b/tests/integration/targets/filter_keep_keys/vars/main/tests.yml
index f1abceddda..c480a675d0 100644
--- a/tests/integration/targets/filter_keep_keys/vars/main/tests.yml
+++ b/tests/integration/targets/filter_keep_keys/vars/main/tests.yml
@@ -11,8 +11,8 @@ tests:
- {k0_x0: A0, k1_x1: B0, k2_x2: [C0], k3_x3: foo}
- {k0_x0: A1, k1_x1: B1, k2_x2: [C1], k3_x3: bar}
result:
- - {k0_x0: A0, k1_x1: B0}
- - {k0_x0: A1, k1_x1: B1}
+ - {k0_x0: A0, k1_x1: B0}
+ - {k0_x0: A1, k1_x1: B1}
- template: mp.j2
group:
- {mp: equal, tt: [k0_x0, k1_x1], d: Match keys that equal any of the items in the target.}
@@ -24,8 +24,8 @@ tests:
- {k0_x0: A0, k1_x1: B0, k2_x2: [C0], k3_x3: foo}
- {k0_x0: A1, k1_x1: B1, k2_x2: [C1], k3_x3: bar}
result:
- - {k0_x0: A0, k1_x1: B0}
- - {k0_x0: A1, k1_x1: B1}
+ - {k0_x0: A0, k1_x1: B0}
+ - {k0_x0: A1, k1_x1: B1}
- template: mp.j2
group:
- {mp: equal, tt: k0_x0, d: Match keys that equal the target.}
@@ -36,5 +36,5 @@ tests:
- {k0_x0: A0, k1_x1: B0, k2_x2: [C0], k3_x3: foo}
- {k0_x0: A1, k1_x1: B1, k2_x2: [C1], k3_x3: bar}
result:
- - {k0_x0: A0}
- - {k0_x0: A1}
+ - {k0_x0: A0}
+ - {k0_x0: A1}
diff --git a/tests/integration/targets/filter_lists_mergeby/tasks/main.yml b/tests/integration/targets/filter_lists_mergeby/tasks/main.yml
index d0bda368cd..f599c2d93e 100644
--- a/tests/integration/targets/filter_lists_mergeby/tasks/main.yml
+++ b/tests/integration/targets/filter_lists_mergeby/tasks/main.yml
@@ -6,6 +6,5 @@
- name: Test list_merge default options
import_tasks: lists_mergeby_default.yml
-- name: Test list_merge non-default options in Ansible 2.10 and higher
+- name: Test list_merge non-default options
import_tasks: lists_mergeby_2-10.yml
- when: ansible_version.full is version('2.10', '>=')
diff --git a/tests/integration/targets/filter_random_mac/tasks/main.yml b/tests/integration/targets/filter_random_mac/tasks/main.yml
index db47011927..583ef498a3 100644
--- a/tests/integration/targets/filter_random_mac/tasks/main.yml
+++ b/tests/integration/targets/filter_random_mac/tasks/main.yml
@@ -45,15 +45,15 @@
- _bad_random_mac_filter is failed
- "_bad_random_mac_filter.msg is search('Invalid value (.*) for random_mac: 5 colon.* separated items max')"
-- name: Verify random_mac filter
+- name: Verify random_mac filter
assert:
that:
- - "'00' | community.general.random_mac is match('^00:[a-f0-9][a-f0-9]:[a-f0-9][a-f0-9]:[a-f0-9][a-f0-9]:[a-f0-9][a-f0-9]:[a-f0-9][a-f0-9]$')"
- - "'00:00' | community.general.random_mac is match('^00:00:[a-f0-9][a-f0-9]:[a-f0-9][a-f0-9]:[a-f0-9][a-f0-9]:[a-f0-9][a-f0-9]$')"
- - "'00:00:00' | community.general.random_mac is match('^00:00:00:[a-f0-9][a-f0-9]:[a-f0-9][a-f0-9]:[a-f0-9][a-f0-9]$')"
- - "'00:00:00:00' | community.general.random_mac is match('^00:00:00:[a-f0-9][a-f0-9]:[a-f0-9][a-f0-9]:[a-f0-9][a-f0-9]$')"
- - "'00:00:00:00:00' | community.general.random_mac is match('^00:00:00:00:00:[a-f0-9][a-f0-9]$')"
- - "'00:00:00' | community.general.random_mac != '00:00:00' | community.general.random_mac"
+ - "'00' | community.general.random_mac is match('^00:[a-f0-9][a-f0-9]:[a-f0-9][a-f0-9]:[a-f0-9][a-f0-9]:[a-f0-9][a-f0-9]:[a-f0-9][a-f0-9]$')"
+ - "'00:00' | community.general.random_mac is match('^00:00:[a-f0-9][a-f0-9]:[a-f0-9][a-f0-9]:[a-f0-9][a-f0-9]:[a-f0-9][a-f0-9]$')"
+ - "'00:00:00' | community.general.random_mac is match('^00:00:00:[a-f0-9][a-f0-9]:[a-f0-9][a-f0-9]:[a-f0-9][a-f0-9]$')"
+ - "'00:00:00:00' | community.general.random_mac is match('^00:00:00:[a-f0-9][a-f0-9]:[a-f0-9][a-f0-9]:[a-f0-9][a-f0-9]$')"
+ - "'00:00:00:00:00' | community.general.random_mac is match('^00:00:00:00:00:[a-f0-9][a-f0-9]$')"
+ - "'00:00:00' | community.general.random_mac != '00:00:00' | community.general.random_mac"
- name: Verify random_mac filter with seed
assert:
diff --git a/tests/integration/targets/filter_remove_keys/vars/main/tests.yml b/tests/integration/targets/filter_remove_keys/vars/main/tests.yml
index a4767ea799..45b89ba62d 100644
--- a/tests/integration/targets/filter_remove_keys/vars/main/tests.yml
+++ b/tests/integration/targets/filter_remove_keys/vars/main/tests.yml
@@ -37,4 +37,4 @@ tests:
- {k0_x0: A1, k1_x1: B1, k2_x2: [C1], k3_x3: bar}
result:
- {k1_x1: B0, k2_x2: [C0], k3_x3: foo}
- - {k1_x1: B1, k2_x2: [C1], k3_x3: bar}
+ - {k1_x1: B1, k2_x2: [C1], k3_x3: bar}
diff --git a/tests/integration/targets/filter_replace_keys/vars/main/tests.yml b/tests/integration/targets/filter_replace_keys/vars/main/tests.yml
index ca906a770b..a6e04f3b2e 100644
--- a/tests/integration/targets/filter_replace_keys/vars/main/tests.yml
+++ b/tests/integration/targets/filter_replace_keys/vars/main/tests.yml
@@ -14,8 +14,8 @@ tests:
- {k0_x0: A0, k1_x1: B0, k2_x2: [C0], k3_x3: foo}
- {k0_x0: A1, k1_x1: B1, k2_x2: [C1], k3_x3: bar}
result:
- - {a0: A0, a1: B0, k2_x2: [C0], k3_x3: foo}
- - {a0: A1, a1: B1, k2_x2: [C1], k3_x3: bar}
+ - {a0: A0, a1: B0, k2_x2: [C0], k3_x3: foo}
+ - {a0: A1, a1: B1, k2_x2: [C1], k3_x3: bar}
- template: mp.j2
group:
- d: Replace keys that starts with any of the attributes before.
@@ -37,8 +37,8 @@ tests:
- {k0_x0: A0, k1_x1: B0, k2_x2: [C0], k3_x3: foo}
- {k0_x0: A1, k1_x1: B1, k2_x2: [C1], k3_x3: bar}
result:
- - {a0: A0, a1: B0, k2_x2: [C0], k3_x3: foo}
- - {a0: A1, a1: B1, k2_x2: [C1], k3_x3: bar}
+ - {a0: A0, a1: B0, k2_x2: [C0], k3_x3: foo}
+ - {a0: A1, a1: B1, k2_x2: [C1], k3_x3: bar}
- template: mp.j2
group:
- d: If more keys match the same attribute before the last one will be used.
@@ -54,8 +54,8 @@ tests:
- {k0_x0: A0, k1_x1: B0, k2_x2: [C0], k3_x3: foo}
- {k0_x0: A1, k1_x1: B1, k2_x2: [C1], k3_x3: bar}
result:
- - X: foo
- - X: bar
+ - X: foo
+ - X: bar
- template: mp.j2
group:
- d: If there are more matches for a key the first one will be used.
diff --git a/tests/integration/targets/filter_reveal_ansible_type/tasks/tasks.yml b/tests/integration/targets/filter_reveal_ansible_type/tasks/tasks.yml
index 79b42ff7b2..48a819f62d 100644
--- a/tests/integration/targets/filter_reveal_ansible_type/tasks/tasks.yml
+++ b/tests/integration/targets/filter_reveal_ansible_type/tasks/tasks.yml
@@ -64,7 +64,7 @@
assert:
that: result == dtype
success_msg: '"abc" is {{ dtype }}'
- fail_msg: '"abc" is {{ result }}, not {{ dtype }}'
+ fail_msg: '"abc" is {{ result }}, not {{ dtype }}'
quiet: '{{ quiet_test | default(true) | bool }}'
vars:
result: '{{ "abc" | community.general.reveal_ansible_type }}'
@@ -74,7 +74,7 @@
assert:
that: result == dtype
success_msg: '123 is {{ dtype }}'
- fail_msg: '123 is {{ result }}, not {{ dtype }}'
+ fail_msg: '123 is {{ result }}, not {{ dtype }}'
quiet: '{{ quiet_test | default(true) | bool }}'
vars:
result: '{{ 123 | community.general.reveal_ansible_type }}'
@@ -84,7 +84,7 @@
assert:
that: result == dtype
success_msg: '123.45 is {{ dtype }}'
- fail_msg: '123.45 is {{ result }}, not {{ dtype }}'
+ fail_msg: '123.45 is {{ result }}, not {{ dtype }}'
quiet: '{{ quiet_test | default(true) | bool }}'
vars:
result: '{{ 123.45 | community.general.reveal_ansible_type }}'
@@ -94,7 +94,7 @@
assert:
that: result == dtype
success_msg: 'true is {{ dtype }}'
- fail_msg: 'true is {{ result }}, not {{ dtype }}'
+ fail_msg: 'true is {{ result }}, not {{ dtype }}'
quiet: '{{ quiet_test | default(true) | bool }}'
vars:
result: '{{ true | community.general.reveal_ansible_type }}'
@@ -104,7 +104,7 @@
assert:
that: result == dtype
success_msg: '["a", "b", "c"] is {{ dtype }}'
- fail_msg: '["a", "b", "c"] is {{ result }}, not {{ dtype }}'
+ fail_msg: '["a", "b", "c"] is {{ result }}, not {{ dtype }}'
quiet: '{{ quiet_test | default(true) | bool }}'
vars:
result: '{{ ["a", "b", "c"] | community.general.reveal_ansible_type }}'
@@ -114,7 +114,7 @@
assert:
that: result == dtype
success_msg: '[{"a": 1}, {"b": 2}] is {{ dtype }}'
- fail_msg: '[{"a": 1}, {"b": 2}] is {{ result }}, not {{ dtype }}'
+ fail_msg: '[{"a": 1}, {"b": 2}] is {{ result }}, not {{ dtype }}'
quiet: '{{ quiet_test | default(true) | bool }}'
vars:
result: '{{ [{"a": 1}, {"b": 2}] | community.general.reveal_ansible_type }}'
@@ -124,7 +124,7 @@
assert:
that: result == dtype
success_msg: '{"a": 1} is {{ dtype }}'
- fail_msg: '{"a": 1} is {{ result }}, not {{ dtype }}'
+ fail_msg: '{"a": 1} is {{ result }}, not {{ dtype }}'
quiet: '{{ quiet_test | default(true) | bool }}'
vars:
result: '{{ {"a": 1} | community.general.reveal_ansible_type }}'
@@ -134,7 +134,7 @@
assert:
that: result == dtype
success_msg: '{"a": 1, "b": 2} is {{ dtype }}'
- fail_msg: '{"a": 1, "b": 2} is {{ result }}, not {{ dtype }}'
+ fail_msg: '{"a": 1, "b": 2} is {{ result }}, not {{ dtype }}'
quiet: '{{ quiet_test | default(true) | bool }}'
vars:
result: '{{ {"a": 1, "b": 2} | community.general.reveal_ansible_type }}'
@@ -175,7 +175,7 @@
quiet: '{{ quiet_test | default(true) | bool }}'
vars:
alias: {"AnsibleUnicode": "str", "_AnsibleTaggedStr": "str", "_AnsibleTaggedInt": "int", "_AnsibleTaggedFloat": "float"}
- data: {'a': 1, 'b': 1.1, 'c': 'abc', 'd': True, 'e': ['x', 'y', 'z'], 'f': {'x': 1, 'y': 2}}
+ data: {'a': 1, 'b': 1.1, 'c': 'abc', 'd': true, 'e': ['x', 'y', 'z'], 'f': {'x': 1, 'y': 2}}
result: '{{ data | community.general.reveal_ansible_type(alias) }}'
dtype: dict[str, bool|dict|float|int|list|str]
@@ -187,6 +187,6 @@
quiet: '{{ quiet_test | default(true) | bool }}'
vars:
alias: {"AnsibleUnicode": "str", "_AnsibleTaggedStr": "str", "_AnsibleTaggedInt": "int", "_AnsibleTaggedFloat": "float"}
- data: [1, 2, 1.1, 'abc', True, ['x', 'y', 'z'], {'x': 1, 'y': 2}]
+ data: [1, 2, 1.1, 'abc', true, ['x', 'y', 'z'], {'x': 1, 'y': 2}]
result: '{{ data | community.general.reveal_ansible_type(alias) }}'
dtype: list[bool|dict|float|int|list|str]
diff --git a/tests/integration/targets/filter_to_prettytable/aliases b/tests/integration/targets/filter_to_prettytable/aliases
new file mode 100644
index 0000000000..afda346c4e
--- /dev/null
+++ b/tests/integration/targets/filter_to_prettytable/aliases
@@ -0,0 +1,5 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/posix/1
diff --git a/tests/integration/targets/filter_to_prettytable/tasks/main.yml b/tests/integration/targets/filter_to_prettytable/tasks/main.yml
new file mode 100644
index 0000000000..95d4118e27
--- /dev/null
+++ b/tests/integration/targets/filter_to_prettytable/tasks/main.yml
@@ -0,0 +1,658 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Copyright (c) 2025, Timur Gadiev (tgadiev@gmail.com)
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Install required libs
+ pip:
+ name: prettytable
+ state: present
+ delegate_to: localhost
+ become: false
+
+- name: Set test data
+ set_fact:
+ test_data:
+ - name: Alice
+ age: 25
+ role: admin
+ - name: Bob
+ age: 30
+ role: user
+ data_for_align:
+ - date: 2023-01-01
+ description: Office supplies
+ amount: 123.45
+
+# Test basic functionality
+- name: Test basic table creation
+ set_fact:
+ basic_table: '{{ test_data | community.general.to_prettytable }}'
+ expected_basic_table: |-
+ +-------+-----+-------+
+ | name | age | role |
+ +-------+-----+-------+
+ | Alice | 25 | admin |
+ | Bob | 30 | user |
+ +-------+-----+-------+
+
+- name: Verify basic table output
+ assert:
+ that:
+ - basic_table == expected_basic_table
+
+# Test column ordering
+- name: Test column ordering
+ set_fact:
+ ordered_table: "{{ test_data | community.general.to_prettytable(column_order=['role', 'name', 'age']) }}"
+ expected_ordered_table: |-
+ +-------+-------+-----+
+ | role | name | age |
+ +-------+-------+-----+
+ | admin | Alice | 25 |
+ | user | Bob | 30 |
+ +-------+-------+-----+
+
+- name: Verify ordered table output
+ assert:
+ that:
+ - ordered_table == expected_ordered_table
+
+# Test selective column ordering (subset of keys)
+- name: Test selective column ordering
+ set_fact:
+ selective_ordered_table: "{{ test_data | community.general.to_prettytable(column_order=['name', 'role']) }}"
+ expected_selective_table: |-
+ +-------+-------+
+ | name | role |
+ +-------+-------+
+ | Alice | admin |
+ | Bob | user |
+ +-------+-------+
+
+- name: Verify selective column ordering
+ assert:
+ that:
+ - selective_ordered_table == expected_selective_table
+
+# Test custom headers
+- name: Test custom headers
+ set_fact:
+ headers_table: "{{ test_data | community.general.to_prettytable(header_names=['User Name', 'User Age', 'User Role']) }}"
+ expected_headers_table: |-
+ +-----------+----------+-----------+
+ | User Name | User Age | User Role |
+ +-----------+----------+-----------+
+ | Alice | 25 | admin |
+ | Bob | 30 | user |
+ +-----------+----------+-----------+
+
+- name: Verify custom headers output
+ assert:
+ that:
+ - headers_table == expected_headers_table
+
+# Test selective column ordering with custom headers (subset of keys)
+- name: Test selective column ordering with custom headers
+ set_fact:
+ selective_ordered_headers_table: "{{ test_data | community.general.to_prettytable(column_order=['name', 'role'], header_names=['User Name', 'User Role']) }}"
+ expected_selective_headers_table: |-
+ +-----------+-----------+
+ | User Name | User Role |
+ +-----------+-----------+
+ | Alice | admin |
+ | Bob | user |
+ +-----------+-----------+
+
+- name: Verify selective column ordering with custom headers
+ assert:
+ that:
+ - selective_ordered_headers_table == expected_selective_headers_table
+
+# Test alignments
+- name: Test column alignments
+ set_fact:
+ aligned_table: "{{ data_for_align | community.general.to_prettytable(column_alignments={'amount': 'right', 'description': 'left', 'date': 'center'}) }}"
+ expected_aligned_table: |-
+ +------------+-----------------+--------+
+ | date | description | amount |
+ +------------+-----------------+--------+
+ | 2023-01-01 | Office supplies | 123.45 |
+ +------------+-----------------+--------+
+
+- name: Verify aligned table output
+ assert:
+ that:
+ - aligned_table == expected_aligned_table
+
+# Test combined options
+- name: Test combined options
+ set_fact:
+ combined_table: "{{ test_data | community.general.to_prettytable(
+ column_order=['role', 'name', 'age'],
+ header_names=['Position', 'Full Name', 'Years'],
+ column_alignments={'role': 'left', 'name': 'center', 'age': 'right'}) }}"
+ expected_combined_table: |-
+ +----------+-----------+-------+
+ | Position | Full Name | Years |
+ +----------+-----------+-------+
+ | admin | Alice | 25 |
+ | user | Bob | 30 |
+ +----------+-----------+-------+
+
+- name: Verify combined table output
+ assert:
+ that:
+ - combined_table == expected_combined_table
+
+# Test empty data
+- name: Test empty data list with no parameters
+ set_fact:
+ empty_table: "{{ [] | community.general.to_prettytable }}"
+ expected_empty_table: |-
+ ++
+ ++
+
+- name: Verify empty table output
+ assert:
+ that:
+ - empty_table == expected_empty_table
+
+# Test empty data with column_order
+- name: Test empty data list with column_order
+ set_fact:
+ empty_with_columns: "{{ [] | community.general.to_prettytable(column_order=['name', 'age', 'role']) }}"
+ expected_empty_with_columns: |-
+ +------+-----+------+
+ | name | age | role |
+ +------+-----+------+
+ +------+-----+------+
+
+- name: Verify empty table with column_order
+ assert:
+ that:
+ - empty_with_columns == expected_empty_with_columns
+
+# Test empty data with header_names
+- name: Test empty data list with header_names
+ set_fact:
+ empty_with_headers: "{{ [] | community.general.to_prettytable(header_names=['User Name', 'User Age', 'User Role']) }}"
+ expected_empty_with_headers: |-
+ +-----------+----------+-----------+
+ | User Name | User Age | User Role |
+ +-----------+----------+-----------+
+ +-----------+----------+-----------+
+
+- name: Verify empty table with header_names
+ assert:
+ that:
+ - empty_with_headers == expected_empty_with_headers
+
+# Test empty data with combined parameters
+- name: Test empty data with combined parameters
+ set_fact:
+ empty_combined: "{{ [] | community.general.to_prettytable(
+ column_order=['role', 'name', 'age'],
+ header_names=['Position', 'Full Name', 'Years'],
+ column_alignments={'role': 'left', 'name': 'center', 'age': 'right'}) }}"
+ expected_empty_combined: |-
+ +----------+-----------+-------+
+ | Position | Full Name | Years |
+ +----------+-----------+-------+
+ +----------+-----------+-------+
+
+- name: Verify empty table with combined parameters
+ assert:
+ that:
+ - empty_combined == expected_empty_combined
+
+# Test validation with empty data
+- name: Test empty data with non-list column_order (expect failure)
+ block:
+ - set_fact:
+ invalid_table: "{{ [] | community.general.to_prettytable(column_order=123) }}"
+ register: failure_result
+ ignore_errors: true
+ - name: Verify error message for empty data with invalid column_order
+ assert:
+ that:
+ - failure_result is failed
+ - >
+ "Expected a list of column names, got a int" in failure_result.msg
+
+- name: Test empty data with non-list header_names (expect failure)
+ block:
+ - set_fact:
+ invalid_table: "{{ [] | community.general.to_prettytable(header_names='invalid_headers') }}"
+ register: failure_result
+ ignore_errors: true
+ - name: Verify error message for empty data with invalid header_names
+ assert:
+ that:
+ - failure_result is failed
+ - >
+ "Expected a list of header names, got a string" in failure_result.msg
+
+- name: Test empty data with non-dictionary column_alignments (expect failure)
+ block:
+ - set_fact:
+ invalid_table: "{{ [] | community.general.to_prettytable(column_alignments='invalid') }}"
+ register: failure_result
+ ignore_errors: true
+ - name: Verify error message for empty data with invalid column_alignments
+ assert:
+ that:
+ - failure_result is failed
+ - >
+ "Expected a dictionary for column_alignments, got a string" in failure_result.msg
+
+- name: Test empty data with non-string values in column_alignments (expect failure)
+ block:
+ - set_fact:
+ invalid_table: "{{ [] | community.general.to_prettytable(column_alignments={'name': 123}) }}"
+ register: failure_result
+ ignore_errors: true
+ - name: Verify error message for empty data with non-string values in column_alignments
+ assert:
+ that:
+ - failure_result is failed
+ - >
+ "Expected a string for column_alignments value, got a int" in failure_result.msg
+
+- name: Test empty data with invalid alignment value in column_alignments (expect failure)
+ block:
+ - set_fact:
+ invalid_table: "{{ [] | community.general.to_prettytable(column_alignments={'name': 'invalid'}) }}"
+ register: failure_result
+ ignore_errors: true
+ - name: Verify error message for empty data with invalid alignment value
+ assert:
+ that:
+ - failure_result is failed
+ - >
+ "Invalid alignment 'invalid' in 'column_alignments'" in failure_result.msg
+ - >
+ "Valid alignments are" in failure_result.msg
+
+- name: Test empty data with mismatched column_order and header_names (expect failure)
+ block:
+ - set_fact:
+ invalid_table: "{{ [] | community.general.to_prettytable(column_order=['a', 'b', 'c'], header_names=['X', 'Y']) }}"
+ register: failure_result
+ ignore_errors: true
+ - name: Verify error message for empty data with mismatched lengths
+ assert:
+ that:
+ - failure_result is failed
+ - >
+ "'column_order' and 'header_names' must have the same number of elements" in failure_result.msg
+
+# Test error conditions
+- name: Test non-list input (expect failure)
+ block:
+ - set_fact:
+ invalid_table: "{{ 'not_a_list' | community.general.to_prettytable }}"
+ register: failure_result
+ ignore_errors: true
+ - name: Verify error message for non-list input
+ assert:
+ that:
+ - failure_result is failed
+ - >
+ "Expected a list of dictionaries, got a string" in failure_result.msg
+
+- name: Test list with non-dictionary items (expect failure)
+ block:
+ - set_fact:
+ invalid_table: "{{ ['not_a_dict', 'also_not_a_dict'] | community.general.to_prettytable }}"
+ register: failure_result
+ ignore_errors: true
+ - name: Verify error message for non-dictionary items
+ assert:
+ that:
+ - failure_result is failed
+ - >
+ "Expected all items in the list to be dictionaries, got a string" in failure_result.msg
+
+- name: Test non-list column_order (expect failure)
+ block:
+ - set_fact:
+ invalid_table: "{{ test_data | community.general.to_prettytable(column_order=123) }}"
+ register: failure_result
+ ignore_errors: true
+ - name: Verify error message for non-list column_order
+ assert:
+ that:
+ - failure_result is failed
+ - >
+ "Expected a list of column names, got a int" in failure_result.msg
+
+- name: Test non-list header_names (expect failure)
+ block:
+ - set_fact:
+ invalid_table: "{{ test_data | community.general.to_prettytable(header_names='invalid_headers') }}"
+ register: failure_result
+ ignore_errors: true
+ - name: Verify error message for non-list header_names
+ assert:
+ that:
+ - failure_result is failed
+ - >
+ "Expected a list of header names, got a string" in failure_result.msg
+
+- name: Test unknown parameters (expect failure)
+ block:
+ - set_fact:
+ invalid_table: "{{ test_data | community.general.to_prettytable(unknown_param='value') }}"
+ register: failure_result
+ ignore_errors: true
+ - name: Verify error message for unknown parameters
+ assert:
+ that:
+ - failure_result is failed
+ - >
+ "Unknown parameter(s) for to_prettytable filter: unknown_param" in failure_result.msg
+
+- name: Test both positional args and column_order (expect failure)
+ block:
+ - set_fact:
+ invalid_table: "{{ test_data | community.general.to_prettytable('role', 'name', column_order=['name', 'age', 'role']) }}"
+ register: failure_result
+ ignore_errors: true
+ - name: Verify error message for using both positional args and column_order
+ assert:
+ that:
+ - failure_result is failed
+ - >
+ "Cannot use both positional arguments and the 'column_order' keyword argument" in failure_result.msg
+
+- name: Test non-string values in positional args (expect failure)
+ block:
+ - set_fact:
+ invalid_table: "{{ test_data | community.general.to_prettytable('name', 123, 'role') }}"
+ register: failure_result
+ ignore_errors: true
+ - name: Verify error message for non-string values in positional args
+ assert:
+ that:
+ - failure_result is failed
+ - >
+ "Expected a string for column name, got a int" in failure_result.msg
+
+- name: Test non-string values in column_order (expect failure)
+ block:
+ - set_fact:
+ invalid_table: "{{ test_data | community.general.to_prettytable(column_order=['name', 123, 'role']) }}"
+ register: failure_result
+ ignore_errors: true
+ - name: Verify error message for non-string values in column_order
+ assert:
+ that:
+ - failure_result is failed
+ - >
+ "Expected a string for column name, got a int" in failure_result.msg
+
+- name: Test non-string values in header_names (expect failure)
+ block:
+ - set_fact:
+ invalid_table: "{{ test_data | community.general.to_prettytable(header_names=['User Name', 456, 'User Role']) }}"
+ register: failure_result
+ ignore_errors: true
+ - name: Verify error message for non-string values in header_names
+ assert:
+ that:
+ - failure_result is failed
+ - >
+ "Expected a string for header name, got a int" in failure_result.msg
+
+- name: Test mismatched sizes of column_order and header_names (expect failure)
+ block:
+ - set_fact:
+ invalid_table: "{{ test_data | community.general.to_prettytable(column_order=['name', 'age', 'role'], header_names=['User Name', 'User Age']) }}"
+ register: failure_result
+ ignore_errors: true
+ - name: Verify error message for mismatched sizes
+ assert:
+ that:
+ - failure_result is failed
+ - >
+ "'column_order' and 'header_names' must have the same number of elements" in failure_result.msg
+
+- name: Test column_order with more elements than available fields (expect failure)
+ block:
+ - set_fact:
+ invalid_table: "{{ test_data | community.general.to_prettytable(column_order=['name', 'age', 'role', 'extra_field', 'another_extra']) }}"
+ register: failure_result
+ ignore_errors: true
+ - name: Verify error message for column_order with too many elements
+ assert:
+ that:
+ - failure_result is failed
+ - >
+ "'column_order' has more elements" in failure_result.msg
+
+- name: Test header_names with more elements than available fields (expect failure)
+ block:
+ - set_fact:
+ invalid_table: "{{ test_data | community.general.to_prettytable(header_names=['User Name', 'User Age', 'User Role', 'Extra Field', 'Another Extra']) }}"
+ register: failure_result
+ ignore_errors: true
+ - name: Verify error message for header_names with too many elements
+ assert:
+ that:
+ - failure_result is failed
+ - >
+ "'header_names' has more elements" in failure_result.msg
+
+- name: Test column_alignments with more elements than available fields (expect failure)
+ block:
+ - set_fact:
+ invalid_table: "{{ test_data | community.general.to_prettytable(column_alignments={'name': 'center', 'age': 'right', 'role': 'left', 'extra': 'center', 'another': 'left'}) }}"
+ register: failure_result
+ ignore_errors: true
+ - name: Verify error message for column_alignments with too many elements
+ assert:
+ that:
+ - failure_result is failed
+ - >
+ "'column_alignments' has more elements" in failure_result.msg
+
+- name: Test non-dictionary column_alignments (expect failure)
+ block:
+ - set_fact:
+ invalid_table: "{{ test_data | community.general.to_prettytable(column_alignments='invalid') }}"
+ register: failure_result
+ ignore_errors: true
+ - name: Verify error message for non-dictionary column_alignments
+ assert:
+ that:
+ - failure_result is failed
+ - >
+ "Expected a dictionary for column_alignments, got a string" in failure_result.msg
+
+- name: Test non-string keys in column_alignments (expect failure)
+ block:
+ - set_fact:
+ invalid_table: "{{ test_data | community.general.to_prettytable(column_alignments={123: 'center'}) }}"
+ register: failure_result
+ ignore_errors: true
+ - name: Verify error message for non-string keys in column_alignments
+ assert:
+ that:
+ - failure_result is failed
+ - >
+ "Expected a string for column_alignments key, got a int" in failure_result.msg
+
+- name: Test non-string values in column_alignments (expect failure)
+ block:
+ - set_fact:
+ invalid_table: "{{ test_data | community.general.to_prettytable(column_alignments={'name': 123}) }}"
+ register: failure_result
+ ignore_errors: true
+ - name: Verify error message for non-string values in column_alignments
+ assert:
+ that:
+ - failure_result is failed
+ - >
+ "Expected a string for column_alignments value, got a int" in failure_result.msg
+
+- name: Test invalid alignment value in column_alignments (expect failure)
+ block:
+ - set_fact:
+ invalid_table: "{{ test_data | community.general.to_prettytable(column_alignments={'name': 'invalid'}) }}"
+ register: failure_result
+ ignore_errors: true
+ - name: Verify error message for invalid alignment value in column_alignments
+ assert:
+ that:
+ - failure_result is failed
+ - >
+ "Invalid alignment 'invalid' in 'column_alignments'" in failure_result.msg
+ - >
+ "Valid alignments are" in failure_result.msg
+
+# Test using explicit python script to create dictionary with mixed key types
+- name: Create test data with numeric keys
+ set_fact:
+ mixed_key_data:
+ - name: Alice
+ role: admin
+ 1: ID001
+ - name: Bob
+ role: user
+ 1: ID002
+
+- name: Test prettytable with mixed key types
+ set_fact:
+ mixed_key_table: "{{ mixed_key_data | community.general.to_prettytable }}"
+ expected_mixed_key_table: |-
+ +-------+-------+-------+
+ | name | role | 1 |
+ +-------+-------+-------+
+ | Alice | admin | ID001 |
+ | Bob | user | ID002 |
+ +-------+-------+-------+
+
+- name: Verify mixed key types were handled correctly
+ assert:
+ that:
+ - mixed_key_table == expected_mixed_key_table
+
+# Test column ordering with numeric keys
+- name: Test column ordering with numeric keys
+ set_fact:
+ mixed_ordered_table: "{{ mixed_key_data | community.general.to_prettytable(column_order=['1', 'name', 'role']) }}"
+ expected_ordered_numeric_table: |-
+ +-------+-------+-------+
+ | 1 | name | role |
+ +-------+-------+-------+
+ | ID001 | Alice | admin |
+ | ID002 | Bob | user |
+ +-------+-------+-------+
+
+- name: Verify column ordering with numeric keys
+ assert:
+ that:
+ - mixed_ordered_table == expected_ordered_numeric_table
+
+# Test custom headers with numeric keys
+- name: Test custom headers with numeric keys
+ set_fact:
+ mixed_headers_table: "{{ mixed_key_data | community.general.to_prettytable(header_names=['Name', 'Role', 'ID']) }}"
+ expected_headers_numeric_table: |-
+ +-------+-------+-------+
+ | Name | Role | ID |
+ +-------+-------+-------+
+ | Alice | admin | ID001 |
+ | Bob | user | ID002 |
+ +-------+-------+-------+
+
+- name: Verify custom headers with numeric keys
+ assert:
+ that:
+ - mixed_headers_table == expected_headers_numeric_table
+
+# Test column alignments with numeric keys
+- name: Test column alignments with numeric keys
+ set_fact:
+ mixed_aligned_table: "{{ mixed_key_data | community.general.to_prettytable(column_alignments={'1': 'right', 'name': 'left', 'role': 'center'}) }}"
+ expected_aligned_numeric_table: |-
+ +-------+-------+-------+
+ | name | role | 1 |
+ +-------+-------+-------+
+ | Alice | admin | ID001 |
+ | Bob | user | ID002 |
+ +-------+-------+-------+
+
+- name: Verify column alignments with numeric keys
+ assert:
+ that:
+ - mixed_aligned_table == expected_aligned_numeric_table
+
+# Test with boolean-like string keys
+- name: Create test data with boolean-like string keys
+ set_fact:
+ boolean_data:
+ - name: Alice
+ role: admin
+ true: 'Yes'
+ false: 'No'
+ - name: Bob
+ role: user
+ true: 'No'
+ false: 'Yes'
+
+- name: Test prettytable with boolean-like string keys
+ set_fact:
+ bool_table: "{{ boolean_data | community.general.to_prettytable }}"
+ expected_bool_table: |-
+ +-------+-------+------+-------+
+ | name | role | True | False |
+ +-------+-------+------+-------+
+ | Alice | admin | Yes | No |
+ | Bob | user | No | Yes |
+ +-------+-------+------+-------+
+
+- name: Verify boolean-like keys were handled correctly
+ assert:
+ that:
+ - bool_table == expected_bool_table
+
+# Test that column_order with capitalized boolean names works via case-insensitive matching
+- name: Test column ordering with capitalized boolean names
+ set_fact:
+ bool_ordered_table: "{{ boolean_data | community.general.to_prettytable(column_order=['True', 'False', 'name', 'role']) }}"
+ expected_bool_ordered_table: |-
+ +------+-------+-------+-------+
+ | True | False | name | role |
+ +------+-------+-------+-------+
+ | Yes | No | Alice | admin |
+ | No | Yes | Bob | user |
+ +------+-------+-------+-------+
+
+- name: Verify that 'True' in column_order works with 'true' keys
+ assert:
+ that:
+ - bool_ordered_table == expected_bool_ordered_table
+
+# Test column alignments with boolean-like string keys
+- name: Test column alignments with boolean-like string keys
+ set_fact:
+ bool_aligned_table: "{{ boolean_data | community.general.to_prettytable(column_alignments={'true': 'right', 'false': 'center', 'name': 'left'}) }}"
+ expected_bool_aligned_table: |-
+ +-------+-------+------+-------+
+ | name | role | True | False |
+ +-------+-------+------+-------+
+ | Alice | admin | Yes | No |
+ | Bob | user | No | Yes |
+ +-------+-------+------+-------+
+
+- name: Verify column alignments with boolean-like string keys
+ assert:
+ that:
+ - bool_aligned_table == expected_bool_aligned_table
diff --git a/tests/integration/targets/filter_version_sort/tasks/main.yml b/tests/integration/targets/filter_version_sort/tasks/main.yml
index 08985d1bae..e7a7e3757c 100644
--- a/tests/integration/targets/filter_version_sort/tasks/main.yml
+++ b/tests/integration/targets/filter_version_sort/tasks/main.yml
@@ -8,7 +8,7 @@
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-- name: validate that versions are properly sorted in a stable way
+- name: validate that versions are properly sorted in a stable way
assert:
that:
- "['a-1.9.rpm', 'a-1.10-1.rpm', 'a-1.09.rpm', 'b-1.01.rpm', 'a-2.1-0.rpm', 'a-1.10-0.rpm'] | community.general.version_sort == ['a-1.9.rpm', 'a-1.09.rpm', 'a-1.10-0.rpm', 'a-1.10-1.rpm', 'a-2.1-0.rpm', 'b-1.01.rpm']"
diff --git a/tests/integration/targets/flatpak/tasks/main.yml b/tests/integration/targets/flatpak/tasks/main.yml
index deaf354e8a..e05e2a168e 100644
--- a/tests/integration/targets/flatpak/tasks/main.yml
+++ b/tests/integration/targets/flatpak/tasks/main.yml
@@ -11,53 +11,53 @@
- block:
- - import_tasks: setup.yml
- become: true
+ - import_tasks: setup.yml
+ become: true
- # executable override
+ # executable override
- - name: Test executable override
- flatpak:
- name: com.dummy.App1
- remote: dummy-remote
- state: present
- executable: nothing-that-exists
- ignore_errors: true
- register: executable_override_result
+ - name: Test executable override
+ flatpak:
+ name: com.dummy.App1
+ remote: dummy-remote
+ state: present
+ executable: nothing-that-exists
+ ignore_errors: true
+ register: executable_override_result
- - name: Verify executable override test result
- assert:
- that:
- - executable_override_result is failed
- - executable_override_result is not changed
- msg: "Specifying non-existing executable shall fail module execution"
+ - name: Verify executable override test result
+ assert:
+ that:
+ - executable_override_result is failed
+ - executable_override_result is not changed
+ msg: "Specifying non-existing executable shall fail module execution"
- - import_tasks: check_mode.yml
- become: false
+ - import_tasks: check_mode.yml
+ become: false
- - import_tasks: test.yml
- become: false
- vars:
- method: user
+ - import_tasks: test.yml
+ become: false
+ vars:
+ method: user
- - import_tasks: test.yml
- become: true
- vars:
- method: system
+ - import_tasks: test.yml
+ become: true
+ vars:
+ method: system
always:
- - name: Check HTTP server status
- async_status:
- jid: "{{ webserver_status.ansible_job_id }}"
- ignore_errors: true
+ - name: Check HTTP server status
+ async_status:
+ jid: "{{ webserver_status.ansible_job_id }}"
+ ignore_errors: true
- - name: List processes
- command: ps aux
+ - name: List processes
+ command: ps aux
- - name: Stop HTTP server
- command: >-
- pkill -f -- '{{ remote_tmp_dir }}/serve.py'
+ - name: Stop HTTP server
+ command: >-
+ pkill -f -- '{{ remote_tmp_dir }}/serve.py'
when: |
ansible_distribution == 'Fedora' or
diff --git a/tests/integration/targets/flatpak/tasks/setup.yml b/tests/integration/targets/flatpak/tasks/setup.yml
index 4dfdd68cb9..041c736624 100644
--- a/tests/integration/targets/flatpak/tasks/setup.yml
+++ b/tests/integration/targets/flatpak/tasks/setup.yml
@@ -11,17 +11,17 @@
when: ansible_distribution == 'Fedora'
- block:
- - name: Activate flatpak ppa on Ubuntu
- apt_repository:
- repo: ppa:alexlarsson/flatpak
- state: present
- mode: '0644'
- when: ansible_lsb.major_release | int < 18
+ - name: Activate flatpak ppa on Ubuntu
+ apt_repository:
+ repo: ppa:alexlarsson/flatpak
+ state: present
+ mode: '0644'
+ when: ansible_lsb.major_release | int < 18
- - name: Install flatpak package on Ubuntu
- apt:
- name: flatpak
- state: present
+ - name: Install flatpak package on Ubuntu
+ apt:
+ name: flatpak
+ state: present
when: ansible_distribution == 'Ubuntu'
diff --git a/tests/integration/targets/flatpak/tasks/test.yml b/tests/integration/targets/flatpak/tasks/test.yml
index 658f7b1168..1c580b6fbf 100644
--- a/tests/integration/targets/flatpak/tasks/test.yml
+++ b/tests/integration/targets/flatpak/tasks/test.yml
@@ -164,25 +164,25 @@
- when: url_removal_result is not failed
block:
- - name: Verify removal test result - {{ method }}
- assert:
- that:
- - url_removal_result is changed
- msg: "state=absent with url as name shall remove flatpak when present"
+ - name: Verify removal test result - {{ method }}
+ assert:
+ that:
+ - url_removal_result is changed
+ msg: "state=absent with url as name shall remove flatpak when present"
- - name: Test idempotency of removal with url - {{ method }}
- flatpak:
- name: http://127.0.0.1:8000/repo/com.dummy.App1.flatpakref
- state: absent
- method: "{{ method }}"
- no_dependencies: true
- register: double_url_removal_result
+ - name: Test idempotency of removal with url - {{ method }}
+ flatpak:
+ name: http://127.0.0.1:8000/repo/com.dummy.App1.flatpakref
+ state: absent
+ method: "{{ method }}"
+ no_dependencies: true
+ register: double_url_removal_result
- - name: Verify idempotency of removal with url test result - {{ method }}
- assert:
- that:
- - double_url_removal_result is not changed
- msg: "state=absent with url as name shall not do anything when flatpak is not present"
+ - name: Verify idempotency of removal with url test result - {{ method }}
+ assert:
+ that:
+ - double_url_removal_result is not changed
+ msg: "state=absent with url as name shall not do anything when flatpak is not present"
- name: Make sure flatpak is really gone - {{ method }}
flatpak:
diff --git a/tests/integration/targets/flatpak_remote/tasks/main.yml b/tests/integration/targets/flatpak_remote/tasks/main.yml
index 1c50912328..951ab5aefb 100644
--- a/tests/integration/targets/flatpak_remote/tasks/main.yml
+++ b/tests/integration/targets/flatpak_remote/tasks/main.yml
@@ -11,39 +11,39 @@
- block:
- - import_tasks: setup.yml
- become: true
+ - import_tasks: setup.yml
+ become: true
- # executable override
+ # executable override
- - name: Test executable override
- flatpak_remote:
- name: irrelevant
- remote: irrelevant
- state: present
- executable: nothing-that-exists
- ignore_errors: true
- register: executable_override_result
+ - name: Test executable override
+ flatpak_remote:
+ name: irrelevant
+ remote: irrelevant
+ state: present
+ executable: nothing-that-exists
+ ignore_errors: true
+ register: executable_override_result
- - name: Verify executable override test result
- assert:
- that:
- - executable_override_result is failed
- - executable_override_result is not changed
- msg: "Specifying non-existing executable shall fail module execution"
+ - name: Verify executable override test result
+ assert:
+ that:
+ - executable_override_result is failed
+ - executable_override_result is not changed
+ msg: "Specifying non-existing executable shall fail module execution"
- - import_tasks: check_mode.yml
- become: false
+ - import_tasks: check_mode.yml
+ become: false
- - import_tasks: test.yml
- become: false
- vars:
- method: user
+ - import_tasks: test.yml
+ become: false
+ vars:
+ method: user
- - import_tasks: test.yml
- become: true
- vars:
- method: system
+ - import_tasks: test.yml
+ become: true
+ vars:
+ method: system
when: |
ansible_distribution == 'Fedora' or
diff --git a/tests/integration/targets/flatpak_remote/tasks/setup.yml b/tests/integration/targets/flatpak_remote/tasks/setup.yml
index 55a14c9724..9fbf4cbe15 100644
--- a/tests/integration/targets/flatpak_remote/tasks/setup.yml
+++ b/tests/integration/targets/flatpak_remote/tasks/setup.yml
@@ -9,16 +9,16 @@
state: present
when: ansible_distribution == 'Fedora'
- block:
- - name: Activate flatpak ppa on Ubuntu versions older than 18.04/bionic
- apt_repository:
- repo: ppa:alexlarsson/flatpak
- state: present
- mode: '0644'
- when: ansible_lsb.major_release | int < 18
- - name: Install flatpak package on Ubuntu
- apt:
- name: flatpak
- state: present
+ - name: Activate flatpak ppa on Ubuntu versions older than 18.04/bionic
+ apt_repository:
+ repo: ppa:alexlarsson/flatpak
+ state: present
+ mode: '0644'
+ when: ansible_lsb.major_release | int < 18
+ - name: Install flatpak package on Ubuntu
+ apt:
+ name: flatpak
+ state: present
when: ansible_distribution == 'Ubuntu'
- name: Install flatpak remote for testing check mode
flatpak_remote:
diff --git a/tests/integration/targets/gandi_livedns/defaults/main.yml b/tests/integration/targets/gandi_livedns/defaults/main.yml
index ec1808d8b5..7acd5c0cab 100644
--- a/tests/integration/targets/gandi_livedns/defaults/main.yml
+++ b/tests/integration/targets/gandi_livedns/defaults/main.yml
@@ -6,32 +6,32 @@
gandi_livedns_domain_name: "ansible-tests.org"
gandi_livedns_record_items:
-# Single A record
-- record: test-www
- type: A
- values:
+ # Single A record
+ - record: test-www
+ type: A
+ values:
- 10.10.10.10
- ttl: 400
- update_values:
+ ttl: 400
+ update_values:
- 10.10.10.11
- update_ttl: 800
+ update_ttl: 800
-# Multiple A records
-- record: test-www-multiple
- type: A
- ttl: 3600
- values:
+ # Multiple A records
+ - record: test-www-multiple
+ type: A
+ ttl: 3600
+ values:
- 10.10.11.10
- 10.10.11.10
- update_values:
+ update_values:
- 10.10.11.11
- 10.10.11.13
-# CNAME
-- record: test-cname
- type: CNAME
- ttl: 10800
- values:
+ # CNAME
+ - record: test-cname
+ type: CNAME
+ ttl: 10800
+ values:
- test-www2
- update_values:
+ update_values:
- test-www
diff --git a/tests/integration/targets/gandi_livedns/tasks/create_record.yml b/tests/integration/targets/gandi_livedns/tasks/create_record.yml
index 87056aa865..708fa02715 100644
--- a/tests/integration/targets/gandi_livedns/tasks/create_record.yml
+++ b/tests/integration/targets/gandi_livedns/tasks/create_record.yml
@@ -15,7 +15,7 @@
- name: verify test absent dns record
assert:
that:
- - result is successful
+ - result is successful
- name: test create a dns record in check mode
community.general.gandi_livedns:
@@ -30,7 +30,7 @@
- name: verify test create a dns record in check mode
assert:
that:
- - result is changed
+ - result is changed
- name: test create a dns record
community.general.gandi_livedns:
@@ -44,11 +44,11 @@
- name: verify test create a dns record
assert:
that:
- - result is changed
- - result.record['values'] == item['values']
- - result.record.record == item.record
- - result.record.type == item.type
- - result.record.ttl == item.ttl
+ - result is changed
+ - result.record['values'] == item['values']
+ - result.record.record == item.record
+ - result.record.type == item.type
+ - result.record.ttl == item.ttl
- name: test create a dns record idempotence
community.general.gandi_livedns:
@@ -62,11 +62,11 @@
- name: verify test create a dns record idempotence
assert:
that:
- - result is not changed
- - result.record['values'] == item['values']
- - result.record.record == item.record
- - result.record.type == item.type
- - result.record.ttl == item.ttl
+ - result is not changed
+ - result.record['values'] == item['values']
+ - result.record.record == item.record
+ - result.record.type == item.type
+ - result.record.ttl == item.ttl
- name: test create a DNS record with personal access token
community.general.gandi_livedns:
diff --git a/tests/integration/targets/gandi_livedns/tasks/remove_record.yml b/tests/integration/targets/gandi_livedns/tasks/remove_record.yml
index c4b937fd5a..1e46ac32f8 100644
--- a/tests/integration/targets/gandi_livedns/tasks/remove_record.yml
+++ b/tests/integration/targets/gandi_livedns/tasks/remove_record.yml
@@ -16,7 +16,7 @@
- name: verify test remove a dns record in check mode
assert:
that:
- - result is changed
+ - result is changed
- name: test remove a dns record
community.general.gandi_livedns:
@@ -30,7 +30,7 @@
- name: verify test remove a dns record
assert:
that:
- - result is changed
+ - result is changed
- name: test remove a dns record idempotence
community.general.gandi_livedns:
@@ -44,7 +44,7 @@
- name: verify test remove a dns record idempotence
assert:
that:
- - result is not changed
+ - result is not changed
- name: test remove second dns record idempotence
community.general.gandi_livedns:
@@ -58,4 +58,4 @@
- name: verify test remove a dns record idempotence
assert:
that:
- - result is not changed
+ - result is not changed
diff --git a/tests/integration/targets/gandi_livedns/tasks/update_record.yml b/tests/integration/targets/gandi_livedns/tasks/update_record.yml
index 5f19bfa244..1bcd82fb3f 100644
--- a/tests/integration/targets/gandi_livedns/tasks/update_record.yml
+++ b/tests/integration/targets/gandi_livedns/tasks/update_record.yml
@@ -16,11 +16,11 @@
- name: verify test update in check mode
assert:
that:
- - result is changed
- - result.record['values'] == (item.update_values | default(item['values']))
- - result.record.record == item.record
- - result.record.type == item.type
- - result.record.ttl == (item.update_ttl | default(item.ttl))
+ - result is changed
+ - result.record['values'] == (item.update_values | default(item['values']))
+ - result.record.record == item.record
+ - result.record.type == item.type
+ - result.record.ttl == (item.update_ttl | default(item.ttl))
- name: test update or add another dns record
community.general.gandi_livedns:
@@ -34,11 +34,11 @@
- name: verify test update a dns record
assert:
that:
- - result is changed
- - result.record['values'] == (item.update_values | default(item['values']))
- - result.record.record == item.record
- - result.record.ttl == (item.update_ttl | default(item.ttl))
- - result.record.type == item.type
+ - result is changed
+ - result.record['values'] == (item.update_values | default(item['values']))
+ - result.record.record == item.record
+ - result.record.ttl == (item.update_ttl | default(item.ttl))
+ - result.record.type == item.type
- name: test update or add another dns record idempotence
community.general.gandi_livedns:
@@ -52,8 +52,8 @@
- name: verify test update a dns record idempotence
assert:
that:
- - result is not changed
- - result.record['values'] == (item.update_values | default(item['values']))
- - result.record.record == item.record
- - result.record.ttl == (item.update_ttl | default(item.ttl))
- - result.record.type == item.type
+ - result is not changed
+ - result.record['values'] == (item.update_values | default(item['values']))
+ - result.record.record == item.record
+ - result.record.ttl == (item.update_ttl | default(item.ttl))
+ - result.record.type == item.type
diff --git a/tests/integration/targets/gem/tasks/main.yml b/tests/integration/targets/gem/tasks/main.yml
index 2848a92bfb..0c85e56489 100644
--- a/tests/integration/targets/gem/tasks/main.yml
+++ b/tests/integration/targets/gem/tasks/main.yml
@@ -13,202 +13,202 @@
- not (ansible_os_family == 'Alpine') # TODO
block:
- - include_vars: '{{ item }}'
- with_first_found:
- - files:
- - '{{ ansible_distribution }}-{{ ansible_distribution_major_version }}.yml'
- - '{{ ansible_distribution }}-{{ ansible_distribution_version }}.yml'
- - '{{ ansible_distribution }}.yml'
- - '{{ ansible_os_family }}.yml'
- - 'default.yml'
- paths: '../vars'
+ - include_vars: '{{ item }}'
+ with_first_found:
+ - files:
+ - '{{ ansible_distribution }}-{{ ansible_distribution_major_version }}.yml'
+ - '{{ ansible_distribution }}-{{ ansible_distribution_version }}.yml'
+ - '{{ ansible_distribution }}.yml'
+ - '{{ ansible_os_family }}.yml'
+ - 'default.yml'
+ paths: '../vars'
- - name: Install dependencies for test
- package:
- name: "{{ item }}"
- state: present
- loop: "{{ test_packages }}"
- when: ansible_distribution != "MacOSX"
+ - name: Install dependencies for test
+ package:
+ name: "{{ item }}"
+ state: present
+ loop: "{{ test_packages }}"
+ when: ansible_distribution != "MacOSX"
- - name: Install a gem
- gem:
- name: gist
- state: present
- register: install_gem_result
- ignore_errors: true
-
- # when running as root on Fedora, '--install-dir' is set in the os defaults which is
- # incompatible with '--user-install', we ignore this error for this case only
- - name: fail if failed to install gem
- fail:
- msg: "failed to install gem: {{ install_gem_result.msg }}"
- when:
- - install_gem_result is failed
- - not (ansible_user_uid == 0 and "User --install-dir or --user-install but not both" not in install_gem_result.msg)
-
- - block:
- - name: List gems
- command: gem list
- register: current_gems
-
- - name: Ensure gem was installed
- assert:
- that:
- - install_gem_result is changed
- - current_gems.stdout is search('gist\s+\([0-9.]+\)')
-
- - name: Remove a gem
+ - name: Install a gem
gem:
name: gist
- state: absent
- register: remove_gem_results
+ state: present
+ register: install_gem_result
+ ignore_errors: true
- - name: List gems
- command: gem list
- register: current_gems
+ # when running as root on Fedora, '--install-dir' is set in the os defaults which is
+ # incompatible with '--user-install', we ignore this error for this case only
+ - name: fail if failed to install gem
+ fail:
+ msg: "failed to install gem: {{ install_gem_result.msg }}"
+ when:
+ - install_gem_result is failed
+ - not (ansible_user_uid == 0 and "User --install-dir or --user-install but not both" not in install_gem_result.msg)
- - name: Verify gem is not installed
+ - block:
+ - name: List gems
+ command: gem list
+ register: current_gems
+
+ - name: Ensure gem was installed
+ assert:
+ that:
+ - install_gem_result is changed
+ - current_gems.stdout is search('gist\s+\([0-9.]+\)')
+
+ - name: Remove a gem
+ gem:
+ name: gist
+ state: absent
+ register: remove_gem_results
+
+ - name: List gems
+ command: gem list
+ register: current_gems
+
+ - name: Verify gem is not installed
+ assert:
+ that:
+ - remove_gem_results is changed
+ - current_gems.stdout is not search('gist\s+\([0-9.]+\)')
+ when: not install_gem_result is failed
+
+ # install gem in --no-user-install
+ - block:
+ - name: Install a gem with --no-user-install
+ gem:
+ name: gist
+ state: present
+ user_install: false
+ register: install_gem_result
+
+ - name: List gems
+ command: gem list
+ register: current_gems
+
+ - name: Ensure gem was installed
+ assert:
+ that:
+ - install_gem_result is changed
+ - current_gems.stdout is search('gist\s+\([0-9.]+\)')
+
+ - name: Remove a gem
+ gem:
+ name: gist
+ state: absent
+ register: remove_gem_results
+
+ - name: List gems
+ command: gem list
+ register: current_gems
+
+ - name: Verify gem is not installed
+ assert:
+ that:
+ - remove_gem_results is changed
+ - current_gems.stdout is not search('gist\s+\([0-9.]+\)')
+ when: ansible_user_uid == 0
+
+ # Check custom gem directory
+ - name: Install gem in a custom directory with incorrect options
+ gem:
+ name: gist
+ state: present
+ install_dir: "{{ remote_tmp_dir }}/gems"
+ ignore_errors: true
+ register: install_gem_fail_result
+
+ - debug:
+ var: install_gem_fail_result
+ tags: debug
+
+ - name: Ensure previous task failed
assert:
that:
- - remove_gem_results is changed
- - current_gems.stdout is not search('gist\s+\([0-9.]+\)')
- when: not install_gem_result is failed
+ - install_gem_fail_result is failed
+ - install_gem_fail_result.msg == 'install_dir requires user_install=false'
- # install gem in --no-user-install
- - block:
- - name: Install a gem with --no-user-install
+ - name: Install a gem in a custom directory
gem:
name: gist
state: present
user_install: false
+ install_dir: "{{ remote_tmp_dir }}/gems"
register: install_gem_result
- - name: List gems
- command: gem list
- register: current_gems
+ - name: Find gems in custom directory
+ find:
+ paths: "{{ remote_tmp_dir }}/gems/gems"
+ file_type: directory
+ contains: gist
+ register: gem_search
- - name: Ensure gem was installed
+ - name: Ensure gem was installed in custom directory
assert:
that:
- install_gem_result is changed
- - current_gems.stdout is search('gist\s+\([0-9.]+\)')
+ - gem_search.files[0].path is search('gist-[0-9.]+')
+ ignore_errors: true
- - name: Remove a gem
+ - name: Remove a gem in a custom directory
gem:
name: gist
state: absent
- register: remove_gem_results
+ user_install: false
+ install_dir: "{{ remote_tmp_dir }}/gems"
+ register: install_gem_result
- - name: List gems
- command: gem list
- register: current_gems
+ - name: Find gems in custom directory
+ find:
+ paths: "{{ remote_tmp_dir }}/gems/gems"
+ file_type: directory
+ contains: gist
+ register: gem_search
- - name: Verify gem is not installed
+ - name: Ensure gem was removed in custom directory
assert:
that:
- - remove_gem_results is changed
- - current_gems.stdout is not search('gist\s+\([0-9.]+\)')
- when: ansible_user_uid == 0
+ - install_gem_result is changed
+ - gem_search.files | length == 0
- # Check custom gem directory
- - name: Install gem in a custom directory with incorrect options
- gem:
- name: gist
- state: present
- install_dir: "{{ remote_tmp_dir }}/gems"
- ignore_errors: true
- register: install_gem_fail_result
+ # Custom directory for executables (--bindir)
+ - name: Install gem with custom bindir
+ gem:
+ name: gist
+ state: present
+ bindir: "{{ remote_tmp_dir }}/custom_bindir"
+ norc: true
+ user_install: false # Avoid conflicts between --install-dir and --user-install when running as root on CentOS / Fedora / RHEL
+ register: install_gem_result
- - debug:
- var: install_gem_fail_result
- tags: debug
+ - name: Get stats of gem executable
+ stat:
+ path: "{{ remote_tmp_dir }}/custom_bindir/gist"
+ register: gem_bindir_stat
- - name: Ensure previous task failed
- assert:
- that:
- - install_gem_fail_result is failed
- - install_gem_fail_result.msg == 'install_dir requires user_install=false'
+ - name: Ensure gem executable was installed in custom directory
+ assert:
+ that:
+ - install_gem_result is changed
+ - gem_bindir_stat.stat.exists and gem_bindir_stat.stat.isreg
- - name: Install a gem in a custom directory
- gem:
- name: gist
- state: present
- user_install: false
- install_dir: "{{ remote_tmp_dir }}/gems"
- register: install_gem_result
+ - name: Remove gem with custom bindir
+ gem:
+ name: gist
+ state: absent
+ bindir: "{{ remote_tmp_dir }}/custom_bindir"
+ norc: true
+ user_install: false # Avoid conflicts between --install-dir and --user-install when running as root on CentOS / Fedora / RHEL
+ register: install_gem_result
- - name: Find gems in custom directory
- find:
- paths: "{{ remote_tmp_dir }}/gems/gems"
- file_type: directory
- contains: gist
- register: gem_search
+ - name: Get stats of gem executable
+ stat:
+ path: "{{ remote_tmp_dir }}/custom_bindir/gist"
+ register: gem_bindir_stat
- - name: Ensure gem was installed in custom directory
- assert:
- that:
- - install_gem_result is changed
- - gem_search.files[0].path is search('gist-[0-9.]+')
- ignore_errors: true
-
- - name: Remove a gem in a custom directory
- gem:
- name: gist
- state: absent
- user_install: false
- install_dir: "{{ remote_tmp_dir }}/gems"
- register: install_gem_result
-
- - name: Find gems in custom directory
- find:
- paths: "{{ remote_tmp_dir }}/gems/gems"
- file_type: directory
- contains: gist
- register: gem_search
-
- - name: Ensure gem was removed in custom directory
- assert:
- that:
- - install_gem_result is changed
- - gem_search.files | length == 0
-
- # Custom directory for executables (--bindir)
- - name: Install gem with custom bindir
- gem:
- name: gist
- state: present
- bindir: "{{ remote_tmp_dir }}/custom_bindir"
- norc: true
- user_install: false # Avoid conflicts between --install-dir and --user-install when running as root on CentOS / Fedora / RHEL
- register: install_gem_result
-
- - name: Get stats of gem executable
- stat:
- path: "{{ remote_tmp_dir }}/custom_bindir/gist"
- register: gem_bindir_stat
-
- - name: Ensure gem executable was installed in custom directory
- assert:
- that:
- - install_gem_result is changed
- - gem_bindir_stat.stat.exists and gem_bindir_stat.stat.isreg
-
- - name: Remove gem with custom bindir
- gem:
- name: gist
- state: absent
- bindir: "{{ remote_tmp_dir }}/custom_bindir"
- norc: true
- user_install: false # Avoid conflicts between --install-dir and --user-install when running as root on CentOS / Fedora / RHEL
- register: install_gem_result
-
- - name: Get stats of gem executable
- stat:
- path: "{{ remote_tmp_dir }}/custom_bindir/gist"
- register: gem_bindir_stat
-
- - name: Ensure gem executable was removed from custom directory
- assert:
- that:
- - install_gem_result is changed
- - not gem_bindir_stat.stat.exists
+ - name: Ensure gem executable was removed from custom directory
+ assert:
+ that:
+ - install_gem_result is changed
+ - not gem_bindir_stat.stat.exists
diff --git a/tests/integration/targets/git_config/tasks/exclusion_state_list-all.yml b/tests/integration/targets/git_config/tasks/exclusion_state_list-all.yml
deleted file mode 100644
index e294a83fb5..0000000000
--- a/tests/integration/targets/git_config/tasks/exclusion_state_list-all.yml
+++ /dev/null
@@ -1,20 +0,0 @@
----
-# Copyright (c) Ansible Project
-# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-- import_tasks: setup_no_value.yml
-
-- name: testing exclusion between state and list_all parameters
- git_config:
- list_all: true
- state: absent
- register: result
- ignore_errors: true
-
-- name: assert git_config failed
- assert:
- that:
- - result is failed
- - "result.msg == 'parameters are mutually exclusive: list_all|state'"
-...
diff --git a/tests/integration/targets/git_config/tasks/get_set_no_state.yml b/tests/integration/targets/git_config/tasks/get_set_no_state.yml
index 4e41bf4e9d..6963e679d4 100644
--- a/tests/integration/targets/git_config/tasks/get_set_no_state.yml
+++ b/tests/integration/targets/git_config/tasks/get_set_no_state.yml
@@ -13,7 +13,7 @@
register: set_result
- name: getting value without state
- git_config:
+ git_config_info:
name: "{{ option_name }}"
scope: "{{ option_scope }}"
register: get_result
@@ -24,6 +24,5 @@
- set_result is changed
- set_result.diff.before == "\n"
- set_result.diff.after == option_value + "\n"
- - get_result is not changed
- get_result.config_value == option_value
...
diff --git a/tests/integration/targets/git_config/tasks/get_set_state_present.yml b/tests/integration/targets/git_config/tasks/get_set_state_present.yml
index cfc3bbe78d..28f031aeb1 100644
--- a/tests/integration/targets/git_config/tasks/get_set_state_present.yml
+++ b/tests/integration/targets/git_config/tasks/get_set_state_present.yml
@@ -14,10 +14,9 @@
register: result
- name: getting value with state=present
- git_config:
+ git_config_info:
name: "{{ option_name }}"
scope: "{{ option_scope }}"
- state: present
register: get_result
- name: assert set changed and value is correct with state=present
@@ -26,6 +25,5 @@
- set_result is changed
- set_result.diff.before == "\n"
- set_result.diff.after == option_value + "\n"
- - get_result is not changed
- get_result.config_value == option_value
...
diff --git a/tests/integration/targets/git_config/tasks/get_set_state_present_file.yml b/tests/integration/targets/git_config/tasks/get_set_state_present_file.yml
index c410bfe189..f36b3f3aed 100644
--- a/tests/integration/targets/git_config/tasks/get_set_state_present_file.yml
+++ b/tests/integration/targets/git_config/tasks/get_set_state_present_file.yml
@@ -15,11 +15,10 @@
register: result
- name: getting value with state=present
- git_config:
+ git_config_info:
name: "{{ option_name }}"
scope: "file"
- file: "{{ remote_tmp_dir }}/gitconfig_file"
- state: present
+ path: "{{ remote_tmp_dir }}/gitconfig_file"
register: get_result
- name: assert set changed and value is correct with state=present
@@ -28,6 +27,5 @@
- set_result is changed
- set_result.diff.before == "\n"
- set_result.diff.after == option_value + "\n"
- - get_result is not changed
- get_result.config_value == option_value
...
\ No newline at end of file
diff --git a/tests/integration/targets/git_config/tasks/main.yml b/tests/integration/targets/git_config/tasks/main.yml
index 5fddaf7649..48e411cc22 100644
--- a/tests/integration/targets/git_config/tasks/main.yml
+++ b/tests/integration/targets/git_config/tasks/main.yml
@@ -14,8 +14,6 @@
- block:
- import_tasks: set_value.yml
- # testing parameters exclusion: state and list_all
- - import_tasks: exclusion_state_list-all.yml
# testing get/set option without state
- import_tasks: get_set_no_state.yml
# testing get/set option with state=present
diff --git a/tests/integration/targets/git_config/tasks/precedence_between_unset_and_value.yml b/tests/integration/targets/git_config/tasks/precedence_between_unset_and_value.yml
index a76fbab9cd..ebcd2e8b89 100644
--- a/tests/integration/targets/git_config/tasks/precedence_between_unset_and_value.yml
+++ b/tests/integration/targets/git_config/tasks/precedence_between_unset_and_value.yml
@@ -14,7 +14,7 @@
register: unset_result
- name: getting value
- git_config:
+ git_config_info:
name: "{{ option_name }}"
scope: "{{ option_scope }}"
register: get_result
diff --git a/tests/integration/targets/git_config/tasks/set_multi_value.yml b/tests/integration/targets/git_config/tasks/set_multi_value.yml
index 8d2710b761..94edf94df4 100644
--- a/tests/integration/targets/git_config/tasks/set_multi_value.yml
+++ b/tests/integration/targets/git_config/tasks/set_multi_value.yml
@@ -31,17 +31,11 @@
- 'merge_request.target=foobar'
register: set_result2
-- name: getting the multi-value
- git_config:
- name: push.pushoption
- scope: global
- register: get_single_result
-
- name: getting all values for the single option
git_config_info:
name: push.pushoption
scope: global
- register: get_all_result
+ register: get_result
- name: replace-all values
git_config:
@@ -62,8 +56,8 @@
- set_result2.results[1] is not changed
- set_result2.results[2] is not changed
- set_result3 is changed
- - get_single_result.config_value == 'merge_request.create'
- - 'get_all_result.config_values == {"push.pushoption": ["merge_request.create", "merge_request.draft", "merge_request.target=foobar"]}'
+ - get_result.config_value == 'merge_request.create'
+ - 'get_result.config_values == {"push.pushoption": ["merge_request.create", "merge_request.draft", "merge_request.target=foobar"]}'
- name: assert the diffs are also right
assert:
diff --git a/tests/integration/targets/git_config/tasks/set_value.yml b/tests/integration/targets/git_config/tasks/set_value.yml
index 774e3136a5..54505438cb 100644
--- a/tests/integration/targets/git_config/tasks/set_value.yml
+++ b/tests/integration/targets/git_config/tasks/set_value.yml
@@ -20,7 +20,7 @@
register: set_result2
- name: getting value
- git_config:
+ git_config_info:
name: core.name
scope: global
register: get_result
@@ -30,7 +30,6 @@
that:
- set_result1 is changed
- set_result2 is changed
- - get_result is not changed
- get_result.config_value == 'bar'
- set_result1.diff.before == "\n"
- set_result1.diff.after == "foo\n"
diff --git a/tests/integration/targets/git_config/tasks/set_value_with_tilde.yml b/tests/integration/targets/git_config/tasks/set_value_with_tilde.yml
index 3ca9023aad..e4b1195194 100644
--- a/tests/integration/targets/git_config/tasks/set_value_with_tilde.yml
+++ b/tests/integration/targets/git_config/tasks/set_value_with_tilde.yml
@@ -3,7 +3,7 @@
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-#- import_tasks: setup_no_value.yml
+# - import_tasks: setup_no_value.yml
- name: setting value
git_config:
@@ -22,7 +22,7 @@
register: set_result2
- name: getting value
- git_config:
+ git_config_info:
name: core.hooksPath
scope: global
register: get_result
@@ -32,6 +32,5 @@
that:
- set_result1 is changed
- set_result2 is not changed
- - get_result is not changed
- get_result.config_value == '~/foo/bar'
...
diff --git a/tests/integration/targets/git_config/tasks/unset_check_mode.yml b/tests/integration/targets/git_config/tasks/unset_check_mode.yml
index 39bce33790..dc73b07a52 100644
--- a/tests/integration/targets/git_config/tasks/unset_check_mode.yml
+++ b/tests/integration/targets/git_config/tasks/unset_check_mode.yml
@@ -14,7 +14,7 @@
register: unset_result
- name: getting value
- git_config:
+ git_config_info:
name: "{{ option_name }}"
scope: "{{ option_scope }}"
register: get_result
diff --git a/tests/integration/targets/git_config/tasks/unset_no_value.yml b/tests/integration/targets/git_config/tasks/unset_no_value.yml
index 394276cad7..7c10a474d9 100644
--- a/tests/integration/targets/git_config/tasks/unset_no_value.yml
+++ b/tests/integration/targets/git_config/tasks/unset_no_value.yml
@@ -13,7 +13,7 @@
register: unset_result
- name: getting value
- git_config:
+ git_config_info:
name: "{{ option_name }}"
scope: "{{ option_scope }}"
register: get_result
diff --git a/tests/integration/targets/git_config/tasks/unset_value.yml b/tests/integration/targets/git_config/tasks/unset_value.yml
index 5f8c52c96f..dce0818b96 100644
--- a/tests/integration/targets/git_config/tasks/unset_value.yml
+++ b/tests/integration/targets/git_config/tasks/unset_value.yml
@@ -13,7 +13,7 @@
register: unset_result
- name: getting value
- git_config:
+ git_config_info:
name: "{{ option_name }}"
scope: "{{ option_scope }}"
register: get_result
@@ -37,7 +37,7 @@
register: unset_result
- name: getting value
- git_config:
+ git_config_info:
name: "{{ option_name }}"
scope: "{{ option_scope }}"
register: get_result
diff --git a/tests/integration/targets/git_config_info/tasks/main.yml b/tests/integration/targets/git_config_info/tasks/main.yml
index 993238805e..20042e7427 100644
--- a/tests/integration/targets/git_config_info/tasks/main.yml
+++ b/tests/integration/targets/git_config_info/tasks/main.yml
@@ -15,18 +15,27 @@
- block:
- include_tasks: get_simple_value.yml
loop:
- - { import_file: setup_global.yml, git_scope: 'global' }
- - { import_file: setup_file.yml, git_scope: 'file', git_file: "{{ remote_tmp_dir }}/gitconfig_file" }
+ - import_file: setup_global.yml
+ git_scope: 'global'
+ - import_file: setup_file.yml
+ git_scope: 'file'
+ git_file: "{{ remote_tmp_dir }}/gitconfig_file"
- include_tasks: get_multi_value.yml
loop:
- - { import_file: setup_global.yml, git_scope: 'global' }
- - { import_file: setup_file.yml, git_scope: 'file', git_file: "{{ remote_tmp_dir }}/gitconfig_file" }
+ - import_file: setup_global.yml
+ git_scope: 'global'
+ - import_file: setup_file.yml
+ git_scope: 'file'
+ git_file: "{{ remote_tmp_dir }}/gitconfig_file"
- include_tasks: get_all_values.yml
loop:
- - { import_file: setup_global.yml, git_scope: 'global' }
- - { import_file: setup_file.yml, git_scope: 'file', git_file: "{{ remote_tmp_dir }}/gitconfig_file" }
+ - import_file: setup_global.yml
+ git_scope: 'global'
+ - import_file: setup_file.yml
+ git_scope: 'file'
+ git_file: "{{ remote_tmp_dir }}/gitconfig_file"
- include_tasks: error_handling.yml
when: git_installed is succeeded and git_version.stdout is version(git_version_supporting_includes, ">=")
diff --git a/tests/integration/targets/github_app_access_token/tasks/main.yml b/tests/integration/targets/github_app_access_token/tasks/main.yml
index 9b7ba5d2c1..dbaa61d230 100644
--- a/tests/integration/targets/github_app_access_token/tasks/main.yml
+++ b/tests/integration/targets/github_app_access_token/tasks/main.yml
@@ -12,7 +12,7 @@
- name: Install JWT
ansible.builtin.pip:
name:
- - jwt
+ - jwt
- name: Read file
ansible.builtin.set_fact:
@@ -26,5 +26,5 @@
- assert:
that:
- - github_app_access_token is failed
- - '"Github return error" in github_app_access_token.msg'
+ - github_app_access_token is failed
+ - '"Github return error" in github_app_access_token.msg'
diff --git a/tests/integration/targets/proxmox_template/aliases b/tests/integration/targets/github_key/aliases
similarity index 92%
rename from tests/integration/targets/proxmox_template/aliases
rename to tests/integration/targets/github_key/aliases
index 5d9af81016..9ee6676643 100644
--- a/tests/integration/targets/proxmox_template/aliases
+++ b/tests/integration/targets/github_key/aliases
@@ -3,4 +3,4 @@
# SPDX-License-Identifier: GPL-3.0-or-later
unsupported
-proxmox_template
+destructive
diff --git a/tests/integration/targets/github_key/tasks/main.yml b/tests/integration/targets/github_key/tasks/main.yml
new file mode 100644
index 0000000000..d9bbf9d229
--- /dev/null
+++ b/tests/integration/targets/github_key/tasks/main.yml
@@ -0,0 +1,58 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Test code for the github_key module.
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Test api_url parameter with GitHub.com
+ community.general.github_key:
+ token: "{{ fake_token }}"
+ name: "{{ test_key_name }}"
+ pubkey: "{{ test_pubkey }}"
+ state: present
+ api_url: "{{ github_api_url }}"
+ register: github_api_result
+ ignore_errors: true
+
+- name: Assert api_url parameter works with GitHub.com
+ assert:
+ that:
+ - github_api_result is failed
+ - '"Unauthorized" in github_api_result.msg or "401" in github_api_result.msg'
+
+- name: Test api_url parameter with GitHub Enterprise
+ community.general.github_key:
+ token: "{{ fake_token }}"
+ name: "{{ test_key_name }}"
+ pubkey: "{{ test_pubkey }}"
+ state: present
+ api_url: "{{ enterprise_api_url }}"
+ register: enterprise_api_result
+ ignore_errors: true
+
+- name: Assert api_url parameter works with GitHub Enterprise
+ assert:
+ that:
+ - enterprise_api_result is failed
+ - '"github.company.com" in enterprise_api_result.msg'
+
+- name: Test api_url with trailing slash
+ community.general.github_key:
+ token: "{{ fake_token }}"
+ name: "{{ test_key_name }}"
+ pubkey: "{{ test_pubkey }}"
+ state: present
+ api_url: "{{ enterprise_api_url_trailing }}"
+ register: trailing_slash_result
+ ignore_errors: true
+
+- name: Assert trailing slash is handled correctly
+ assert:
+ that:
+ - trailing_slash_result is failed
+ - '"github.company.com" in trailing_slash_result.msg'
diff --git a/tests/integration/targets/github_key/vars/main.yml b/tests/integration/targets/github_key/vars/main.yml
new file mode 100644
index 0000000000..23ac841f98
--- /dev/null
+++ b/tests/integration/targets/github_key/vars/main.yml
@@ -0,0 +1,11 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+fake_token: "fake_token_for_testing"
+test_key_name: "ansible-test-key"
+github_api_url: "https://api.github.com"
+enterprise_api_url: "https://github.company.com/api/v3"
+enterprise_api_url_trailing: "https://github.company.com/api/v3/"
+test_pubkey: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDTgvwjlRHZ8E1234567890abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ test@example.com"
diff --git a/tests/integration/targets/gitlab_branch/tasks/main.yml b/tests/integration/targets/gitlab_branch/tasks/main.yml
index 19d90e15cf..ee39f452fc 100644
--- a/tests/integration/targets/gitlab_branch/tasks/main.yml
+++ b/tests/integration/targets/gitlab_branch/tasks/main.yml
@@ -22,7 +22,7 @@
initialize_with_readme: true
state: present
-- name: Create branch {{ gitlab_branch }}
+- name: Create branch {{ gitlab_branch }}
community.general.gitlab_branch:
api_url: https://gitlab.com
api_token: secret_access_token
@@ -54,12 +54,12 @@
branch: "{{ gitlab_branch }}"
state: absent
register: delete_branch
-
+
- name: Test module is idempotent
assert:
that:
- delete_branch is changed
-
+
- name: Clean up {{ gitlab_project_name }}
gitlab_project:
server_url: "{{ gitlab_host }}"
diff --git a/tests/integration/targets/gitlab_group_access_token/defaults/main.yml b/tests/integration/targets/gitlab_group_access_token/defaults/main.yml
index 1b0dab2892..16d4208e08 100644
--- a/tests/integration/targets/gitlab_group_access_token/defaults/main.yml
+++ b/tests/integration/targets/gitlab_group_access_token/defaults/main.yml
@@ -8,8 +8,8 @@
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-gitlab_api_token:
-gitlab_api_url:
+gitlab_api_token:
+gitlab_api_url:
gitlab_validate_certs: false
-gitlab_group_name:
-gitlab_token_name:
+gitlab_group_name:
+gitlab_token_name:
diff --git a/tests/integration/targets/gitlab_group_access_token/tasks/main.yml b/tests/integration/targets/gitlab_group_access_token/tasks/main.yml
index 4e6234238e..da2c9698f7 100644
--- a/tests/integration/targets/gitlab_group_access_token/tasks/main.yml
+++ b/tests/integration/targets/gitlab_group_access_token/tasks/main.yml
@@ -25,8 +25,8 @@
expires_at: '2025-01-01'
access_level: developer
scopes:
- - api
- - read_api
+ - api
+ - read_api
register: create_pfail_token_status
always:
- name: Assert that token creation in nonexisting group failed
@@ -47,8 +47,8 @@
expires_at: '2025-13-01'
access_level: developer
scopes:
- - api
- - read_api
+ - api
+ - read_api
register: create_efail_token_status
always:
- name: Assert that token creation with invalid expires_at failed
@@ -68,8 +68,8 @@
expires_at: '2024-12-31'
access_level: developer
scopes:
- - api
- - read_api
+ - api
+ - read_api
register: create_token_status
- name: Assert that token creation with valid arguments is successfull
assert:
@@ -88,8 +88,8 @@
expires_at: '2024-12-31'
access_level: developer
scopes:
- - api
- - read_api
+ - api
+ - read_api
register: check_token_status
- name: Assert that token creation without changes and recreate=never succeeds with status not changed
assert:
@@ -108,8 +108,8 @@
expires_at: '2024-12-31'
access_level: developer
scopes:
- - api
- - read_api
+ - api
+ - read_api
recreate: state_change
register: check_recreate_token_status
- name: Assert that token creation without changes and recreate=state_change succeeds with status not changed
@@ -130,8 +130,8 @@
expires_at: '2025-01-01'
access_level: developer
scopes:
- - api
- - read_api
+ - api
+ - read_api
register: change_token_status
always:
- name: Assert that token change with recreate=never fails
@@ -151,8 +151,8 @@
expires_at: '2025-01-01'
access_level: developer
scopes:
- - api
- - read_api
+ - api
+ - read_api
recreate: state_change
register: change_recreate_token_status
- name: Assert that token change with recreate=state_change succeeds
@@ -172,8 +172,8 @@
expires_at: '2025-01-01'
access_level: developer
scopes:
- - api
- - read_api
+ - api
+ - read_api
recreate: always
register: change_recreate1_token_status
- name: Assert that token change with recreate=always succeeds
@@ -193,8 +193,8 @@
expires_at: '2024-12-31'
access_level: developer
scopes:
- - api
- - read_api
+ - api
+ - read_api
register: revoke_token_status
- name: Assert that token revocation succeeds
assert:
@@ -212,8 +212,8 @@
expires_at: '2024-12-31'
access_level: developer
scopes:
- - api
- - read_api
+ - api
+ - read_api
register: revoke_token_status
- name: Assert that token revocation succeeds with status not changed
assert:
diff --git a/tests/integration/targets/gitlab_group_variable/tasks/main.yml b/tests/integration/targets/gitlab_group_variable/tasks/main.yml
index 39a3a5df8d..2627080e38 100644
--- a/tests/integration/targets/gitlab_group_variable/tasks/main.yml
+++ b/tests/integration/targets/gitlab_group_variable/tasks/main.yml
@@ -242,7 +242,7 @@
- gitlab_group_variable_state is changed
when: gitlab_premium_tests is defined
-- name: apply again the environment scope change
+- name: apply again the environment scope change
gitlab_group_variable:
api_url: "{{ gitlab_host }}"
api_token: "{{ gitlab_login_token }}"
diff --git a/tests/integration/targets/gitlab_instance_variable/tasks/main.yml b/tests/integration/targets/gitlab_instance_variable/tasks/main.yml
index 94a81698bc..36079e3957 100644
--- a/tests/integration/targets/gitlab_instance_variable/tasks/main.yml
+++ b/tests/integration/targets/gitlab_instance_variable/tasks/main.yml
@@ -438,7 +438,7 @@
- gitlab_instance_variable_state.instance_variable.removed|length == 0
- gitlab_instance_variable_state.instance_variable.updated|length == 0
# VALUE_SPECIFIED_IN_NO_LOG_PARAMETER
- #- gitlab_instance_variable_state.instance_variable.added[0] == "my_test_var"
+ # - gitlab_instance_variable_state.instance_variable.added[0] == "my_test_var"
- name: change variable_type attribute
gitlab_instance_variable:
diff --git a/tests/integration/targets/gitlab_issue/tasks/main.yml b/tests/integration/targets/gitlab_issue/tasks/main.yml
index af1416c3dd..5667851f19 100644
--- a/tests/integration/targets/gitlab_issue/tasks/main.yml
+++ b/tests/integration/targets/gitlab_issue/tasks/main.yml
@@ -14,137 +14,137 @@
state: present
- block:
- - name: Create {{ gitlab_project_name }} project
- gitlab_project:
- api_url: "{{ gitlab_host }}"
- validate_certs: true
- api_token: "{{ gitlab_api_token }}"
- name: "{{ gitlab_project_name }}"
- group: "{{ gitlab_project_group }}"
- default_branch: "{{ gitlab_branch }}"
- initialize_with_readme: true
- state: present
+ - name: Create {{ gitlab_project_name }} project
+ gitlab_project:
+ api_url: "{{ gitlab_host }}"
+ validate_certs: true
+ api_token: "{{ gitlab_api_token }}"
+ name: "{{ gitlab_project_name }}"
+ group: "{{ gitlab_project_group }}"
+ default_branch: "{{ gitlab_branch }}"
+ initialize_with_readme: true
+ state: present
- - name: Create Issue
- gitlab_issue:
- api_token: "{{ gitlab_api_token }}"
- api_url: "{{ gitlab_host }}"
- description: "Test description"
- project: "{{ gitlab_project_group }}/{{ gitlab_project_name }}"
- state: present
- title: "Ansible test issue"
- register: gitlab_issue_create
+ - name: Create Issue
+ gitlab_issue:
+ api_token: "{{ gitlab_api_token }}"
+ api_url: "{{ gitlab_host }}"
+ description: "Test description"
+ project: "{{ gitlab_project_group }}/{{ gitlab_project_name }}"
+ state: present
+ title: "Ansible test issue"
+ register: gitlab_issue_create
- - name: Test Issue Created
- assert:
- that:
- - gitlab_issue_create is changed
+ - name: Test Issue Created
+ assert:
+ that:
+ - gitlab_issue_create is changed
- - name: Create Issue ( Idempotency test )
- gitlab_issue:
- api_token: "{{ gitlab_api_token }}"
- api_url: "{{ gitlab_host }}"
- description: "Test description"
- project: "{{ gitlab_project_group }}/{{ gitlab_project_name }}"
- state: present
- title: "Ansible test issue"
- register: gitlab_issue_create_idempotence
+ - name: Create Issue ( Idempotency test )
+ gitlab_issue:
+ api_token: "{{ gitlab_api_token }}"
+ api_url: "{{ gitlab_host }}"
+ description: "Test description"
+ project: "{{ gitlab_project_group }}/{{ gitlab_project_name }}"
+ state: present
+ title: "Ansible test issue"
+ register: gitlab_issue_create_idempotence
- - name: Test Create Issue is Idempotent
- assert:
- that:
- - gitlab_issue_create_idempotence is not changed
+ - name: Test Create Issue is Idempotent
+ assert:
+ that:
+ - gitlab_issue_create_idempotence is not changed
- - name: Update Issue Test ( Additions )
- gitlab_issue:
- api_token: "{{ gitlab_api_token }}"
- api_url: "{{ gitlab_host }}"
- assignee_ids: "{{ gitlab_assignee_ids }}"
- description_path: "{{ gitlab_description_path }}"
- labels: "{{ gitlab_labels }}"
- milestone_search: "{{ gitlab_milestone_search }}"
- milestone_group_id: "{{ gitlab_milestone_group_id }}"
- project: "{{ gitlab_project_group }}/{{ gitlab_project_name }}"
- state: present
- title: "Ansible test issue"
- register: gitlab_issue_update_additions
+ - name: Update Issue Test ( Additions )
+ gitlab_issue:
+ api_token: "{{ gitlab_api_token }}"
+ api_url: "{{ gitlab_host }}"
+ assignee_ids: "{{ gitlab_assignee_ids }}"
+ description_path: "{{ gitlab_description_path }}"
+ labels: "{{ gitlab_labels }}"
+ milestone_search: "{{ gitlab_milestone_search }}"
+ milestone_group_id: "{{ gitlab_milestone_group_id }}"
+ project: "{{ gitlab_project_group }}/{{ gitlab_project_name }}"
+ state: present
+ title: "Ansible test issue"
+ register: gitlab_issue_update_additions
- - name: Test Issue Updated ( Additions )
- assert:
- that:
- - gitlab_issue_update_additions.issue.labels[0] == "{{ gitlab_labels[0] }}"
- - gitlab_issue_update_additions.issue.assignees[0].username == "{{ gitlab_assignee_ids[0] }}"
- - "'### Description\n\nIssue test description' in gitlab_issue_update_additions.issue.description"
- - gitlab_issue_update_additions.issue.milestone.title == "{{ gitlab_milestone_search }}"
+ - name: Test Issue Updated ( Additions )
+ assert:
+ that:
+ - gitlab_issue_update_additions.issue.labels[0] == "{{ gitlab_labels[0] }}"
+ - gitlab_issue_update_additions.issue.assignees[0].username == "{{ gitlab_assignee_ids[0] }}"
+ - "'### Description\n\nIssue test description' in gitlab_issue_update_additions.issue.description"
+ - gitlab_issue_update_additions.issue.milestone.title == "{{ gitlab_milestone_search }}"
- - name: Update Issue Test ( Persistence )
- gitlab_issue:
- api_token: "{{ gitlab_api_token }}"
- api_url: "{{ gitlab_host }}"
- description_path: "{{ gitlab_description_path }}"
- milestone_search: "{{ gitlab_milestone_search }}"
- milestone_group_id: "{{ gitlab_milestone_group_id }}"
- project: "{{ gitlab_project_group }}/{{ gitlab_project_name }}"
- state: present
- title: "Ansible test issue"
- register: gitlab_issue_update_persistence
+ - name: Update Issue Test ( Persistence )
+ gitlab_issue:
+ api_token: "{{ gitlab_api_token }}"
+ api_url: "{{ gitlab_host }}"
+ description_path: "{{ gitlab_description_path }}"
+ milestone_search: "{{ gitlab_milestone_search }}"
+ milestone_group_id: "{{ gitlab_milestone_group_id }}"
+ project: "{{ gitlab_project_group }}/{{ gitlab_project_name }}"
+ state: present
+ title: "Ansible test issue"
+ register: gitlab_issue_update_persistence
- - name: Test issue Not Updated ( Persistence )
- assert:
- that:
- - gitlab_issue_update_persistence.issue.labels[0] == "{{ gitlab_labels[0] }}"
- - gitlab_issue_update_persistence.issue.assignees[0].username == "{{ gitlab_assignee_ids[0] }}"
+ - name: Test issue Not Updated ( Persistence )
+ assert:
+ that:
+ - gitlab_issue_update_persistence.issue.labels[0] == "{{ gitlab_labels[0] }}"
+ - gitlab_issue_update_persistence.issue.assignees[0].username == "{{ gitlab_assignee_ids[0] }}"
- - name: Update Issue Test ( Removals )
- gitlab_issue:
- api_token: "{{ gitlab_api_token }}"
- api_url: "{{ gitlab_host }}"
- assignee_ids: []
- description_path: "{{ gitlab_description_path }}"
- labels: []
- milestone_search: ""
- milestone_group_id: ""
- project: "{{ gitlab_project_group }}/{{ gitlab_project_name }}"
- state: present
- title: "Ansible test issue"
- register: gitlab_issue_update_removal
+ - name: Update Issue Test ( Removals )
+ gitlab_issue:
+ api_token: "{{ gitlab_api_token }}"
+ api_url: "{{ gitlab_host }}"
+ assignee_ids: []
+ description_path: "{{ gitlab_description_path }}"
+ labels: []
+ milestone_search: ""
+ milestone_group_id: ""
+ project: "{{ gitlab_project_group }}/{{ gitlab_project_name }}"
+ state: present
+ title: "Ansible test issue"
+ register: gitlab_issue_update_removal
- - name: Test issue updated
- assert:
- that:
- - gitlab_issue_update_removal.issue.labels == []
- - gitlab_issue_update_removal.issue.assignees == []
- - gitlab_issue_update_removal.issue.milestone == None
+ - name: Test issue updated
+ assert:
+ that:
+ - gitlab_issue_update_removal.issue.labels == []
+ - gitlab_issue_update_removal.issue.assignees == []
+ - gitlab_issue_update_removal.issue.milestone == None
- - name: Delete Issue
- gitlab_issue:
- api_url: "{{ gitlab_host }}"
- api_token: "{{ gitlab_api_token }}"
- project: "{{ gitlab_project_group }}/{{ gitlab_project_name }}"
- title: "Ansible test issue"
- state: absent
- register: gitlab_issue_delete
+ - name: Delete Issue
+ gitlab_issue:
+ api_url: "{{ gitlab_host }}"
+ api_token: "{{ gitlab_api_token }}"
+ project: "{{ gitlab_project_group }}/{{ gitlab_project_name }}"
+ title: "Ansible test issue"
+ state: absent
+ register: gitlab_issue_delete
- - name: Test issue is deleted
- assert:
- that:
- - gitlab_issue_delete is changed
+ - name: Test issue is deleted
+ assert:
+ that:
+ - gitlab_issue_delete is changed
always:
- - name: Delete Issue
- gitlab_issue:
- api_url: "{{ gitlab_host }}"
- api_token: "{{ gitlab_api_token }}"
- project: "{{ gitlab_project_group }}/{{ gitlab_project_name }}"
- title: "Ansible test issue"
- state_filter: "opened"
- state: absent
- register: gitlab_issue_delete
- - name: Clean up {{ gitlab_project_name }}
- gitlab_project:
- api_url: "{{ gitlab_host }}"
- validate_certs: false
- api_token: "{{ gitlab_api_token }}"
- name: "{{ gitlab_project_name }}"
- group: "{{ gitlab_project_group }}"
- state: absent
+ - name: Delete Issue
+ gitlab_issue:
+ api_url: "{{ gitlab_host }}"
+ api_token: "{{ gitlab_api_token }}"
+ project: "{{ gitlab_project_group }}/{{ gitlab_project_name }}"
+ title: "Ansible test issue"
+ state_filter: "opened"
+ state: absent
+ register: gitlab_issue_delete
+ - name: Clean up {{ gitlab_project_name }}
+ gitlab_project:
+ api_url: "{{ gitlab_host }}"
+ validate_certs: false
+ api_token: "{{ gitlab_api_token }}"
+ name: "{{ gitlab_project_name }}"
+ group: "{{ gitlab_project_group }}"
+ state: absent
diff --git a/tests/integration/targets/gitlab_label/README.md b/tests/integration/targets/gitlab_label/README.md
index e27cb74c8c..06e662749f 100644
--- a/tests/integration/targets/gitlab_label/README.md
+++ b/tests/integration/targets/gitlab_label/README.md
@@ -1,4 +1,4 @@
-
+
+The integration test can be performed as follows:
+
+```
+# 1. Start docker-compose:
+docker-compose -f tests/integration/targets/jenkins_credential/docker-compose.yml down
+docker-compose -f tests/integration/targets/jenkins_credential/docker-compose.yml up -d
+
+# 2. Run the integration tests:
+ansible-test integration jenkins_credential --allow-unsupported -v
+```
diff --git a/tests/integration/targets/jenkins_credential/aliases b/tests/integration/targets/jenkins_credential/aliases
new file mode 100644
index 0000000000..d2086eecf8
--- /dev/null
+++ b/tests/integration/targets/jenkins_credential/aliases
@@ -0,0 +1,5 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+unsupported
\ No newline at end of file
diff --git a/tests/integration/targets/jenkins_credential/docker-compose.yml b/tests/integration/targets/jenkins_credential/docker-compose.yml
new file mode 100644
index 0000000000..c99c9ed575
--- /dev/null
+++ b/tests/integration/targets/jenkins_credential/docker-compose.yml
@@ -0,0 +1,21 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+version: "3.8"
+
+services:
+ jenkins:
+ image: bitnami/jenkins
+ container_name: jenkins-test
+ ports:
+ - "8080:8080"
+ environment:
+ JENKINS_USERNAME: "FishLegs"
+ JENKINS_PASSWORD: "MeatLug"
+ JENKINS_PLUGINS: "credentials,cloudbees-folder,plain-credentials,github-branch-source,github-api,scm-api,workflow-step-api"
+ healthcheck:
+ test: curl -s http://localhost:8080/login || exit 1
+ interval: 10s
+ timeout: 10s
+ retries: 10
diff --git a/tests/integration/targets/jenkins_credential/tasks/add.yml b/tests/integration/targets/jenkins_credential/tasks/add.yml
new file mode 100644
index 0000000000..c956773454
--- /dev/null
+++ b/tests/integration/targets/jenkins_credential/tasks/add.yml
@@ -0,0 +1,169 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Add CUSTOM scope (run {{ run_number }})
+ community.general.jenkins_credential:
+ id: "CUSTOM"
+ type: "scope"
+ jenkins_user: "{{ jenkins_username }}"
+ token: "{{ token }}"
+ description: "Custom scope credential"
+ inc_path:
+ - "include/path"
+ - "include/path2"
+ exc_path:
+ - "exclude/path"
+ - "exclude/path2"
+ inc_hostname:
+ - "included-hostname"
+ - "included-hostname2"
+ exc_hostname:
+ - "excluded-hostname"
+ - "excluded-hostname2"
+ schemes:
+ - "http"
+ - "https"
+ inc_hostname_port:
+ - "included-hostname:7000"
+ - "included-hostname2:7000"
+ exc_hostname_port:
+ - "excluded-hostname:7000"
+ - "excluded-hostname2:7000"
+ register: custom_scope
+
+- name: Assert CUSTOM scope changed value
+ assert:
+ that:
+ - custom_scope.changed == (run_number == 1)
+ fail_msg: "CUSTOM scope changed status incorrect on run {{ run_number }}"
+ success_msg: "CUSTOM scope behaved correctly on run {{ run_number }}"
+
+- name: Add user_and_pass credential (run {{ run_number }})
+ community.general.jenkins_credential:
+ id: "userpass-id"
+ type: "user_and_pass"
+ jenkins_user: "{{ jenkins_username }}"
+ token: "{{ token }}"
+ description: "User and password credential"
+ username: "user1"
+ password: "pass1"
+ register: userpass_cred
+
+- name: Assert user_and_pass changed value
+ assert:
+ that:
+ - userpass_cred.changed == (run_number == 1)
+ fail_msg: "user_and_pass credential changed status incorrect on run {{ run_number }}"
+ success_msg: "user_and_pass credential behaved correctly on run {{ run_number }}"
+
+- name: Add file credential to custom scope (run {{ run_number }})
+ community.general.jenkins_credential:
+ id: "file-id"
+ type: "file"
+ jenkins_user: "{{ jenkins_username }}"
+ token: "{{ token }}"
+ scope: "CUSTOM"
+ description: "File credential"
+ file_path: "{{ output_dir }}/my-secret.pem"
+ register: file_cred
+
+- name: Assert file credential changed value
+ assert:
+ that:
+ - file_cred.changed == (run_number == 1)
+ fail_msg: "file credential changed status incorrect on run {{ run_number }}"
+ success_msg: "file credential behaved correctly on run {{ run_number }}"
+
+- name: Add text credential to folder (run {{ run_number }})
+ community.general.jenkins_credential:
+ id: "text-id"
+ type: "text"
+ jenkins_user: "{{ jenkins_username }}"
+ token: "{{ token }}"
+ description: "Text credential"
+ secret: "mysecrettext"
+ location: "folder"
+ url: "http://localhost:8080/job/test"
+ register: text_cred
+
+- name: Assert text credential changed value
+ assert:
+ that:
+ - text_cred.changed == (run_number == 1)
+ fail_msg: "text credential changed status incorrect on run {{ run_number }}"
+ success_msg: "text credential behaved correctly on run {{ run_number }}"
+
+- name: Add githubApp credential (run {{ run_number }})
+ community.general.jenkins_credential:
+ id: "githubapp-id"
+ type: "github_app"
+ jenkins_user: "{{ jenkins_username }}"
+ token: "{{ token }}"
+ description: "GitHub app credential"
+ appID: "12345"
+ private_key_path: "{{ output_dir }}/github.pem"
+ owner: "github_owner"
+ register: githubapp_cred
+
+- name: Assert githubApp credential changed value
+ assert:
+ that:
+ - githubapp_cred.changed == (run_number == 1)
+ fail_msg: "githubApp credential changed status incorrect on run {{ run_number }}"
+ success_msg: "githubApp credential behaved correctly on run {{ run_number }}"
+
+- name: Add sshKey credential (run {{ run_number }})
+ community.general.jenkins_credential:
+ id: "sshkey-id"
+ type: "ssh_key"
+ jenkins_user: "{{ jenkins_username }}"
+ token: "{{ token }}"
+ description: "SSH key credential"
+ username: "sshuser"
+ private_key_path: "{{ output_dir }}/ssh_key"
+ passphrase: 1234
+ register: sshkey_cred
+
+- name: Assert sshKey credential changed value
+ assert:
+ that:
+ - sshkey_cred.changed == (run_number == 1)
+ fail_msg: "sshKey credential changed status incorrect on run {{ run_number }}"
+ success_msg: "sshKey credential behaved correctly on run {{ run_number }}"
+
+- name: Add certificate (p12) credential (run {{ run_number }})
+ community.general.jenkins_credential:
+ id: "certificate-id"
+ type: "certificate"
+ jenkins_user: "{{ jenkins_username }}"
+ token: "{{ token }}"
+ description: "Certificate credential"
+ password: "12345678901234"
+ file_path: "{{ output_dir }}/certificate.p12"
+ register: cert_p12_cred
+
+- name: Assert certificate (p12) credential changed value
+ assert:
+ that:
+ - cert_p12_cred.changed == (run_number == 1)
+ fail_msg: "certificate (p12) credential changed status incorrect on run {{ run_number }}"
+ success_msg: "certificate (p12) credential behaved correctly on run {{ run_number }}"
+
+- name: Add certificate (pem) credential (run {{ run_number }})
+ community.general.jenkins_credential:
+ id: "certificate-id-pem"
+ type: "certificate"
+ jenkins_user: "{{ jenkins_username }}"
+ token: "{{ token }}"
+ description: "Certificate credential (pem)"
+ file_path: "{{ output_dir }}/cert.pem"
+ private_key_path: "{{ output_dir }}/private.key"
+ register: cert_pem_cred
+
+- name: Assert certificate (pem) credential changed value
+ assert:
+ that:
+ - cert_pem_cred.changed == (run_number == 1)
+ fail_msg: "certificate (pem) credential changed status incorrect on run {{ run_number }}"
+ success_msg: "certificate (pem) credential behaved correctly on run {{ run_number }}"
diff --git a/tests/integration/targets/jenkins_credential/tasks/del.yml b/tests/integration/targets/jenkins_credential/tasks/del.yml
new file mode 100644
index 0000000000..036b65d3a1
--- /dev/null
+++ b/tests/integration/targets/jenkins_credential/tasks/del.yml
@@ -0,0 +1,128 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Delete user_and_pass credential (run {{ run_number }})
+ community.general.jenkins_credential:
+ id: "userpass-id"
+ jenkins_user: "{{ jenkins_username }}"
+ token: "{{ token }}"
+ state: "absent"
+ register: userpass_cred
+
+- name: Assert user_and_pass changed value
+ assert:
+ that:
+ - userpass_cred.changed == (run_number == 1)
+ fail_msg: "user_and_pass credential changed status incorrect on run {{ run_number }}"
+ success_msg: "user_and_pass credential behaved correctly on run {{ run_number }}"
+
+- name: Delete file credential to custom scope (run {{ run_number }})
+ community.general.jenkins_credential:
+ id: "file-id"
+ jenkins_user: "{{ jenkins_username }}"
+ token: "{{ token }}"
+ scope: "CUSTOM"
+ state: "absent"
+ register: file_cred
+
+- name: Assert file credential changed value
+ assert:
+ that:
+ - file_cred.changed == (run_number == 1)
+ fail_msg: "file credential changed status incorrect on run {{ run_number }}"
+ success_msg: "file credential behaved correctly on run {{ run_number }}"
+
+- name: Delete CUSTOM scope credential (run {{ run_number}})
+ community.general.jenkins_credential:
+ id: "CUSTOM"
+ type: "scope"
+ jenkins_user: "{{ jenkins_username }}"
+ token: "{{ token }}"
+ state: "absent"
+ register: custom_scope
+
+- name: Assert CUSTOM scope changed value
+ assert:
+ that:
+ - custom_scope.changed == (run_number == 1)
+ fail_msg: "CUSTOM scope changed status incorrect on run {{ run_number }}"
+ success_msg: "CUSTOM scope behaved correctly on run {{ run_number }}"
+
+- name: Delete text credential to folder (run {{ run_number }})
+ community.general.jenkins_credential:
+ id: "text-id"
+ jenkins_user: "{{ jenkins_username }}"
+ token: "{{ token }}"
+ state: "absent"
+ location: "folder"
+ url: "http://localhost:8080/job/test"
+ register: text_cred
+
+- name: Assert text credential changed value
+ assert:
+ that:
+ - text_cred.changed == (run_number == 1)
+ fail_msg: "text credential changed status incorrect on run {{ run_number }}"
+ success_msg: "text credential behaved correctly on run {{ run_number }}"
+
+- name: Delete githubApp credential (run {{ run_number }})
+ community.general.jenkins_credential:
+ id: "githubapp-id"
+ jenkins_user: "{{ jenkins_username }}"
+ token: "{{ token }}"
+ state: "absent"
+ register: githubapp_cred
+
+- name: Assert githubApp credential changed value
+ assert:
+ that:
+ - githubapp_cred.changed == (run_number == 1)
+ fail_msg: "githubApp credential changed status incorrect on run {{ run_number }}"
+ success_msg: "githubApp credential behaved correctly on run {{ run_number }}"
+
+- name: Delete sshKey credential (run {{ run_number }})
+ community.general.jenkins_credential:
+ id: "sshkey-id"
+ jenkins_user: "{{ jenkins_username }}"
+ token: "{{ token }}"
+ description: "SSH key credential"
+ state: "absent"
+ register: sshkey_cred
+
+- name: Assert sshKey credential changed value
+ assert:
+ that:
+ - sshkey_cred.changed == (run_number == 1)
+ fail_msg: "sshKey credential changed status incorrect on run {{ run_number }}"
+ success_msg: "sshKey credential behaved correctly on run {{ run_number }}"
+
+- name: Delete certificate credential (p12) (run {{ run_number }})
+ community.general.jenkins_credential:
+ id: "certificate-id"
+ jenkins_user: "{{ jenkins_username }}"
+ token: "{{ token }}"
+ state: "absent"
+ register: cert_p12_cred
+
+- name: Assert certificate (p12) credential changed value
+ assert:
+ that:
+ - cert_p12_cred.changed == (run_number == 1)
+ fail_msg: "certificate (p12) credential changed status incorrect on run {{ run_number }}"
+ success_msg: "certificate (p12) credential behaved correctly on run {{ run_number }}"
+
+- name: Delete certificate credential (pem) (run {{ run_number }})
+ community.general.jenkins_credential:
+ id: "certificate-id-pem"
+ jenkins_user: "{{ jenkins_username }}"
+ token: "{{ token }}"
+ state: "absent"
+ register: cert_pem_cred
+
+- name: Assert certificate (pem) credential changed value
+ assert:
+ that:
+ - cert_pem_cred.changed == (run_number == 1)
+ fail_msg: "certificate (pem) credential changed status incorrect on run {{ run_number }}"
+ success_msg: "certificate (pem) credential behaved correctly on run {{ run_number }}"
diff --git a/tests/integration/targets/jenkins_credential/tasks/edit.yml b/tests/integration/targets/jenkins_credential/tasks/edit.yml
new file mode 100644
index 0000000000..bd8d1eff7b
--- /dev/null
+++ b/tests/integration/targets/jenkins_credential/tasks/edit.yml
@@ -0,0 +1,192 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Generate token
+ community.general.jenkins_credential:
+ id: "{{ tokenUuid}}"
+ name: "test-token-2"
+ jenkins_user: "{{ jenkins_username }}"
+ jenkins_password: "{{ jenkins_password }}"
+ type: "token"
+ force: true
+ register: token_result
+
+- name: Set token in vars
+ set_fact:
+ token: "{{ token_result.token }}"
+ tokenUuid: "{{ token_result.token_uuid }}"
+
+- name: Edit CUSTOM scope credential
+ community.general.jenkins_credential:
+ id: "CUSTOM"
+ type: "scope"
+ jenkins_user: "{{ jenkins_username }}"
+ token: "{{ token }}"
+ description: "New custom scope credential"
+ inc_path:
+ - "new_include/path"
+ - "new_include/path2"
+ exc_path:
+ - "new_exclude/path"
+ - "new_exclude/path2"
+ inc_hostname:
+ - "new_included-hostname"
+ - "new_included-hostname2"
+ exc_hostname:
+ - "new_excluded-hostname"
+ - "new_excluded-hostname2"
+ schemes:
+ - "new_http"
+ - "new_https"
+ inc_hostname_port:
+ - "new_included-hostname:7000"
+ - "new_included-hostname2:7000"
+ exc_hostname_port:
+ - "new_excluded-hostname:7000"
+ - "new_excluded-hostname2:7000"
+ force: true
+ register: custom_scope
+
+- name: Assert CUSTOM scope changed value
+ assert:
+ that:
+ - custom_scope.changed == true
+ fail_msg: "CUSTOM scope changed status when it shouldn't"
+ success_msg: "CUSTOM scope behaved correctly"
+
+- name: Edit user_and_pass credential
+ community.general.jenkins_credential:
+ id: "userpass-id"
+ type: "user_and_pass"
+ jenkins_user: "{{ jenkins_username }}"
+ token: "{{ token }}"
+ description: "new user and password credential"
+ username: "user2"
+ password: "pass2"
+ force: true
+ register: userpass_cred
+
+- name: Assert user_and_pass changed value
+ assert:
+ that:
+ - userpass_cred.changed == true
+ fail_msg: "user_and_pass credential changed status incorrect"
+ success_msg: "user_and_pass credential behaved correctly"
+
+- name: Edit file credential to custom scope
+ community.general.jenkins_credential:
+ id: "file-id"
+ type: "file"
+ jenkins_user: "{{ jenkins_username }}"
+ token: "{{ token }}"
+ scope: "CUSTOM"
+ description: "New file credential"
+ file_path: "{{ output_dir }}/my-secret.pem"
+ force: true
+ register: file_cred
+
+- name: Assert file credential changed value
+ assert:
+ that:
+ - file_cred.changed == true
+ fail_msg: "file credential changed status incorrect"
+ success_msg: "file credential behaved correctly"
+
+- name: Edit text credential to folder
+ community.general.jenkins_credential:
+ id: "text-id"
+ type: "text"
+ jenkins_user: "{{ jenkins_username }}"
+ token: "{{ token }}"
+ description: "New text credential"
+ secret: "mynewsecrettext"
+ location: "folder"
+ url: "http://localhost:8080/job/test"
+ force: true
+ register: text_cred
+
+- name: Assert text credential changed value
+ assert:
+ that:
+ - text_cred.changed == true
+ fail_msg: "text credential changed status incorrect"
+ success_msg: "text credential behaved correctly"
+
+- name: Edit githubApp credential
+ community.general.jenkins_credential:
+ id: "githubapp-id"
+ type: "github_app"
+ jenkins_user: "{{ jenkins_username }}"
+ token: "{{ token }}"
+ description: "New GitHub app credential"
+ appID: "12345678"
+ private_key_path: "{{ output_dir }}/github.pem"
+ owner: "new_github_owner"
+ force: true
+ register: githubapp_cred
+
+- name: Assert githubApp credential changed value
+ assert:
+ that:
+ - githubapp_cred.changed == true
+ fail_msg: "githubApp credential changed status incorrect"
+ success_msg: "githubApp credential behaved correctly"
+
+- name: Edit sshKey credential
+ community.general.jenkins_credential:
+ id: "sshkey-id"
+ type: "ssh_key"
+ jenkins_user: "{{ jenkins_username }}"
+ token: "{{ token }}"
+ description: "New SSH key credential"
+ username: "new_sshuser"
+ private_key_path: "{{ output_dir }}/ssh_key"
+ passphrase: 1234
+ force: true
+ register: sshkey_cred
+
+- name: Assert sshKey credential changed value
+ assert:
+ that:
+ - sshkey_cred.changed == true
+ fail_msg: "sshKey credential changed status incorrect"
+ success_msg: "sshKey credential behaved correctly"
+
+- name: Edit certificate credential (p12)
+ community.general.jenkins_credential:
+ id: "certificate-id"
+ type: "certificate"
+ jenkins_user: "{{ jenkins_username }}"
+ token: "{{ token }}"
+ description: "New certificate credential"
+ password: "12345678901234"
+ file_path: "{{ output_dir }}/certificate.p12"
+ force: true
+ register: cert_p12_cred
+
+- name: Assert certificate (p12) credential changed value
+ assert:
+ that:
+ - cert_p12_cred.changed == true
+ fail_msg: "certificate (p12) credential changed status incorrect"
+ success_msg: "certificate (p12) credential behaved correctly"
+
+- name: Edit certificate credential (pem)
+ community.general.jenkins_credential:
+ id: "certificate-id-pem"
+ type: "certificate"
+ jenkins_user: "{{ jenkins_username }}"
+ token: "{{ token }}"
+ description: "New certificate credential (pem)"
+ file_path: "{{ output_dir }}/cert.pem"
+ private_key_path: "{{ output_dir }}/private.key"
+ force: true
+ register: cert_pem_cred
+
+- name: Assert certificate (pem) credential changed value
+ assert:
+ that:
+ - cert_pem_cred.changed == true
+ fail_msg: "certificate (pem) credential changed status incorrect"
+ success_msg: "certificate (pem) credential behaved correctly"
diff --git a/tests/integration/targets/jenkins_credential/tasks/main.yml b/tests/integration/targets/jenkins_credential/tasks/main.yml
new file mode 100644
index 0000000000..88ee0693cf
--- /dev/null
+++ b/tests/integration/targets/jenkins_credential/tasks/main.yml
@@ -0,0 +1,79 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Prepare the test environment
+ include_tasks: pre.yml
+ vars:
+ output_dir: "{{ playbook_dir }}/generated"
+
+- name: Generate token
+ community.general.jenkins_credential:
+ name: "test-token"
+ jenkins_user: "{{ jenkins_username }}"
+ jenkins_password: "{{ jenkins_password }}"
+ type: "token"
+ no_log: true
+ register: token_result
+
+- name: Assert token and tokenUuid are returned
+ assert:
+ that:
+ - token_result.token is defined
+ - token_result.token_uuid is defined
+ fail_msg: "Token generation failed"
+ success_msg: "Token and tokenUuid successfully returned"
+
+- name: Set token facts
+ set_fact:
+ token: "{{ token_result.token }}"
+ tokenUuid: "{{ token_result.token_uuid }}"
+
+- name: Test adding new credentials and scopes
+ include_tasks: add.yml
+ vars:
+ run_number: 1
+ output_dir: "{{ playbook_dir }}/generated"
+
+- name: Test adding credentials and scopes when they already exist
+ include_tasks: add.yml
+ vars:
+ run_number: 2
+ output_dir: "{{ playbook_dir }}/generated"
+
+- name: Test editing credentials
+ include_tasks: edit.yml
+ vars:
+ output_dir: "{{ playbook_dir }}/generated"
+
+- name: Test deleting credentials and scopes
+ include_tasks: del.yml
+ vars:
+ run_number: 1
+
+- name: Test deleting credentials and scopes when they don't exist
+ include_tasks: del.yml
+ vars:
+ run_number: 2
+
+- name: Delete token
+ community.general.jenkins_credential:
+ id: "{{ tokenUuid }}"
+ name: "test-token-2"
+ jenkins_user: "{{ jenkins_username }}"
+ jenkins_password: "{{ jenkins_password }}"
+ state: "absent"
+ type: "token"
+ register: delete_token_result
+
+- name: Assert token deletion
+ assert:
+ that:
+ - delete_token_result.changed is true
+ fail_msg: "Token deletion failed"
+ success_msg: "Token successfully deleted"
+
+- name: Remove generated test files
+ ansible.builtin.file:
+ path: "{{ playbook_dir }}/generated"
+ state: absent
diff --git a/tests/integration/targets/jenkins_credential/tasks/pre.yml b/tests/integration/targets/jenkins_credential/tasks/pre.yml
new file mode 100644
index 0000000000..abb649ae1e
--- /dev/null
+++ b/tests/integration/targets/jenkins_credential/tasks/pre.yml
@@ -0,0 +1,92 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Include Jenkins user variables
+ include_vars: "{{ role_path }}/vars/credentials.yml"
+
+- name: Make sure Jenkins is ready
+ uri:
+ url: http://localhost:8080/login
+ status_code: 200
+ return_content: false
+ timeout: 30
+ register: result
+ retries: 10
+ delay: 5
+ until: result.status == 200
+
+- name: Get Jenkins crumb and save cookie
+ shell: |
+ curl -s -c cookies.txt -u FishLegs:MeatLug http://localhost:8080/crumbIssuer/api/json > crumb.json
+ args:
+ executable: /bin/bash
+
+- name: Read crumb value
+ set_fact:
+ crumb_data: "{{ lookup('file', 'crumb.json') | from_json }}"
+
+- name: Create Jenkins folder 'test'
+ shell: |
+ curl -b cookies.txt -u {{ jenkins_username }}:{{ jenkins_password }} \
+ -H "{{ crumb_data.crumbRequestField }}: {{ crumb_data.crumb }}" \
+ -H "Content-Type: application/xml" \
+ --data-binary @- http://localhost:8080/createItem?name=test <
+ Test Folder
+
+
+ EOF
+ args:
+ executable: /bin/bash
+
+- name: Create output directory
+ ansible.builtin.file:
+ path: "{{ output_dir }}"
+ state: directory
+ mode: "0755"
+
+- name: Generate private key
+ community.crypto.openssl_privatekey:
+ path: "{{ output_dir }}/private.key"
+ size: 2048
+ type: RSA
+
+- name: Generate CSR (certificate signing request)
+ community.crypto.openssl_csr:
+ path: "{{ output_dir }}/request.csr"
+ privatekey_path: "{{ output_dir }}/private.key"
+ common_name: "dummy.local"
+
+- name: Generate self-signed certificate
+ community.crypto.x509_certificate:
+ path: "{{ output_dir }}/cert.pem"
+ privatekey_path: "{{ output_dir }}/private.key"
+ csr_path: "{{ output_dir }}/request.csr"
+ provider: selfsigned
+
+- name: Create PKCS#12 (.p12) file
+ community.crypto.openssl_pkcs12:
+ path: "{{ output_dir }}/certificate.p12"
+ privatekey_path: "{{ output_dir }}/private.key"
+ certificate_path: "{{ output_dir }}/cert.pem"
+ friendly_name: "dummy-cert"
+ passphrase: "12345678901234"
+
+- name: Copy cert.pem to github.pem
+ ansible.builtin.copy:
+ src: "{{ output_dir }}/cert.pem"
+ dest: "{{ output_dir }}/github.pem"
+ remote_src: true
+
+- name: Copy private.key to my-secret.pem
+ ansible.builtin.copy:
+ src: "{{ output_dir }}/private.key"
+ dest: "{{ output_dir }}/my-secret.pem"
+ remote_src: true
+
+- name: Generate dummy SSH key
+ community.crypto.openssh_keypair:
+ path: "{{ output_dir }}/ssh_key"
+ type: rsa
+ size: 2048
diff --git a/tests/integration/targets/jenkins_credential/vars/credentials.yml b/tests/integration/targets/jenkins_credential/vars/credentials.yml
new file mode 100644
index 0000000000..27df98700b
--- /dev/null
+++ b/tests/integration/targets/jenkins_credential/vars/credentials.yml
@@ -0,0 +1,6 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+jenkins_username: FishLegs
+jenkins_password: MeatLug
diff --git a/tests/integration/targets/kdeconfig/tasks/main.yml b/tests/integration/targets/kdeconfig/tasks/main.yml
index 790bb378dc..f2656c5c98 100644
--- a/tests/integration/targets/kdeconfig/tasks/main.yml
+++ b/tests/integration/targets/kdeconfig/tasks/main.yml
@@ -17,7 +17,7 @@
copy:
dest: "{{ kwriteconf_fake }}"
src: kwriteconf_fake
- mode: 0755
+ mode: "0755"
- name: Simple test
kdeconfig:
@@ -182,7 +182,7 @@
values:
- group: test
key: test1
- bool_value: on
+ bool_value: true
kwriteconfig_path: "{{ kwriteconf_fake }}"
register: result_bool_idem
ignore_errors: true
@@ -207,7 +207,7 @@
value: test2
- groups: [testx, testy]
key: testz
- bool_value: on
+ bool_value: true
kwriteconfig_path: "{{ kwriteconf_fake }}"
register: result_checkmode
ignore_errors: true
@@ -236,7 +236,7 @@
value: test2
- groups: [testx, testy]
key: testz
- bool_value: on
+ bool_value: true
kwriteconfig_path: "{{ kwriteconf_fake }}"
register: result_checkmode_apply
ignore_errors: true
@@ -260,7 +260,7 @@
value: test2
- groups: [testx, testy]
key: testz
- bool_value: on
+ bool_value: true
kwriteconfig_path: "{{ kwriteconf_fake }}"
register: result_checkmode2
ignore_errors: true
diff --git a/tests/integration/targets/kernel_blacklist/handlers/main.yml b/tests/integration/targets/kernel_blacklist/handlers/main.yml
index 814c9c51a8..ca97688005 100644
--- a/tests/integration/targets/kernel_blacklist/handlers/main.yml
+++ b/tests/integration/targets/kernel_blacklist/handlers/main.yml
@@ -7,4 +7,3 @@
ansible.builtin.file:
path: /etc/modprobe.d
state: absent
-
\ No newline at end of file
diff --git a/tests/integration/targets/kernel_blacklist/tasks/main.yml b/tests/integration/targets/kernel_blacklist/tasks/main.yml
index 61d766d48e..aecc9b68d5 100644
--- a/tests/integration/targets/kernel_blacklist/tasks/main.yml
+++ b/tests/integration/targets/kernel_blacklist/tasks/main.yml
@@ -65,7 +65,7 @@
- name: test deprecation
assert:
that:
- - "'deprecations' not in bl_test_1 or (ansible_version.major == 2 and ansible_version.minor == 12)"
+ - "'deprecations' not in bl_test_1"
- name: add new item to list
community.general.kernel_blacklist:
diff --git a/tests/integration/targets/keycloak_authentication/tasks/main.yml b/tests/integration/targets/keycloak_authentication/tasks/main.yml
index d286b70a35..cfb193848c 100644
--- a/tests/integration/targets/keycloak_authentication/tasks/main.yml
+++ b/tests/integration/targets/keycloak_authentication/tasks/main.yml
@@ -29,7 +29,7 @@
uri:
url: "{{ url }}/admin/"
status_code: 200
- validate_certs: no
+ validate_certs: false
register: result
until: result.status == 200
retries: 10
@@ -66,9 +66,9 @@
authenticationExecutions:
- providerId: "idp-review-profile"
requirement: "REQUIRED"
- authenticationConfig:
+ authenticationConfig:
alias: "Test review profile config"
- config:
+ config:
update.profile.on.first.login: "missing"
- name: Create auth flow
@@ -82,30 +82,30 @@
description: "browser based authentication with otp"
providerId: "basic-flow"
authenticationExecutions:
- - displayName: Cookie
- providerId: auth-cookie
- requirement: ALTERNATIVE
- - displayName: Kerberos
- providerId: auth-spnego
- requirement: DISABLED
- - displayName: Identity Provider Redirector
- providerId: identity-provider-redirector
- requirement: ALTERNATIVE
- - displayName: My browser otp forms
- requirement: ALTERNATIVE
- - displayName: Username Password Form
- flowAlias: My browser otp forms
- providerId: auth-username-password-form
- requirement: REQUIRED
- - displayName: My browser otp Browser - Conditional OTP
- flowAlias: My browser otp forms
- requirement: REQUIRED
- providerId: "auth-conditional-otp-form"
- authenticationConfig:
- alias: my-conditional-otp-config
- config:
- defaultOtpOutcome: "force"
- noOtpRequiredForHeaderPattern: "{{ keycloak_no_otp_required_pattern_orinale }}"
+ - displayName: Cookie
+ providerId: auth-cookie
+ requirement: ALTERNATIVE
+ - displayName: Kerberos
+ providerId: auth-spnego
+ requirement: DISABLED
+ - displayName: Identity Provider Redirector
+ providerId: identity-provider-redirector
+ requirement: ALTERNATIVE
+ - displayName: My browser otp forms
+ requirement: ALTERNATIVE
+ - displayName: Username Password Form
+ flowAlias: My browser otp forms
+ providerId: auth-username-password-form
+ requirement: REQUIRED
+ - displayName: My browser otp Browser - Conditional OTP
+ flowAlias: My browser otp forms
+ requirement: REQUIRED
+ providerId: "auth-conditional-otp-form"
+ authenticationConfig:
+ alias: my-conditional-otp-config
+ config:
+ defaultOtpOutcome: "force"
+ noOtpRequiredForHeaderPattern: "{{ keycloak_no_otp_required_pattern_orinale }}"
state: present
- name: Modified auth flow with new config
@@ -119,30 +119,30 @@
description: "browser based authentication with otp"
providerId: "basic-flow"
authenticationExecutions:
- - displayName: Cookie
- providerId: auth-cookie
- requirement: ALTERNATIVE
- - displayName: Kerberos
- providerId: auth-spnego
- requirement: DISABLED
- - displayName: Identity Provider Redirector
- providerId: identity-provider-redirector
- requirement: ALTERNATIVE
- - displayName: My browser otp forms
- requirement: ALTERNATIVE
- - displayName: Username Password Form
- flowAlias: My browser otp forms
- providerId: auth-username-password-form
- requirement: REQUIRED
- - displayName: My browser otp Browser - Conditional OTP
- flowAlias: My browser otp forms
- requirement: REQUIRED
- providerId: "auth-conditional-otp-form"
- authenticationConfig:
- alias: my-conditional-otp-config
- config:
- defaultOtpOutcome: "force"
- noOtpRequiredForHeaderPattern: "{{ keycloak_no_otp_required_pattern_modifed }}"
+ - displayName: Cookie
+ providerId: auth-cookie
+ requirement: ALTERNATIVE
+ - displayName: Kerberos
+ providerId: auth-spnego
+ requirement: DISABLED
+ - displayName: Identity Provider Redirector
+ providerId: identity-provider-redirector
+ requirement: ALTERNATIVE
+ - displayName: My browser otp forms
+ requirement: ALTERNATIVE
+ - displayName: Username Password Form
+ flowAlias: My browser otp forms
+ providerId: auth-username-password-form
+ requirement: REQUIRED
+ - displayName: My browser otp Browser - Conditional OTP
+ flowAlias: My browser otp forms
+ requirement: REQUIRED
+ providerId: "auth-conditional-otp-form"
+ authenticationConfig:
+ alias: my-conditional-otp-config
+ config:
+ defaultOtpOutcome: "force"
+ noOtpRequiredForHeaderPattern: "{{ keycloak_no_otp_required_pattern_modifed }}"
state: present
register: result
@@ -166,7 +166,7 @@
- name: Assert `my-conditional-otp-config` exists only once
ansible.builtin.assert:
that:
- - exported_realm.json | community.general.json_query('authenticatorConfig[?alias==`my-conditional-otp-config`]') | length == 1
+ - exported_realm.json | community.general.json_query('authenticatorConfig[?alias==`my-conditional-otp-config`]') | length == 1
- name: Delete auth flow
community.general.keycloak_authentication:
@@ -182,4 +182,4 @@
- name: Remove container
community.docker.docker_container:
name: mykeycloak
- state: absent
\ No newline at end of file
+ state: absent
diff --git a/tests/integration/targets/keycloak_authz_custom_policy/tasks/main.yml b/tests/integration/targets/keycloak_authz_custom_policy/tasks/main.yml
index b22d751215..39500fc86d 100644
--- a/tests/integration/targets/keycloak_authz_custom_policy/tasks/main.yml
+++ b/tests/integration/targets/keycloak_authz_custom_policy/tasks/main.yml
@@ -93,7 +93,7 @@
- result.end_state.type == "script-policy-2.js"
- result.msg == 'Custom policy FirstCustomPolicy already exists'
-# Ensure that we can create multiple instances of the custom policy
+# Ensure that we can create multiple instances of the custom policy
- name: Create second instance of the custom policy
community.general.keycloak_authz_custom_policy:
auth_keycloak_url: "{{ url }}"
diff --git a/tests/integration/targets/keycloak_authz_permission/tasks/main.yml b/tests/integration/targets/keycloak_authz_permission/tasks/main.yml
index 16cb6806f2..889e59c506 100644
--- a/tests/integration/targets/keycloak_authz_permission/tasks/main.yml
+++ b/tests/integration/targets/keycloak_authz_permission/tasks/main.yml
@@ -93,8 +93,8 @@
name: "ScopePermission"
description: "Scope permission"
resources:
- - "Default Resource"
- - "Other Resource"
+ - "Default Resource"
+ - "Other Resource"
permission_type: scope
scopes:
- "file:delete"
diff --git a/tests/integration/targets/keycloak_client/tasks/main.yml b/tests/integration/targets/keycloak_client/tasks/main.yml
index e1a7d2ebfb..e22544ddd4 100644
--- a/tests/integration/targets/keycloak_client/tasks/main.yml
+++ b/tests/integration/targets/keycloak_client/tasks/main.yml
@@ -6,7 +6,7 @@
uri:
url: "{{ url }}/admin/"
status_code: 200
- validate_certs: no
+ validate_certs: false
register: result
until: result.status == 200
retries: 10
@@ -72,7 +72,7 @@
redirect_uris: '{{redirect_uris1}}'
attributes: '{{client_attributes1}}'
protocol_mappers: '{{protocol_mappers1}}'
- authorization_services_enabled: False
+ authorization_services_enabled: false
check_mode: true
register: check_client_when_present_and_same
@@ -94,8 +94,8 @@
redirect_uris: '{{redirect_uris1}}'
attributes: '{{client_attributes1}}'
protocol_mappers: '{{protocol_mappers1}}'
- authorization_services_enabled: False
- service_accounts_enabled: True
+ authorization_services_enabled: false
+ service_accounts_enabled: true
check_mode: true
register: check_client_when_present_and_changed
diff --git a/tests/integration/targets/keycloak_client_rolescope/tasks/main.yml b/tests/integration/targets/keycloak_client_rolescope/tasks/main.yml
index 8675c9548d..d4c60d3f2e 100644
--- a/tests/integration/targets/keycloak_client_rolescope/tasks/main.yml
+++ b/tests/integration/targets/keycloak_client_rolescope/tasks/main.yml
@@ -6,7 +6,7 @@
uri:
url: "{{ url }}/admin/"
status_code: 200
- validate_certs: no
+ validate_certs: false
register: result
until: result.status == 200
retries: 10
@@ -39,9 +39,9 @@
auth_password: "{{ admin_password }}"
name: "{{ item }}"
realm: "{{ realm }}"
- with_items:
- - "{{ realm_role_admin }}"
- - "{{ realm_role_user }}"
+ with_items:
+ - "{{ realm_role_admin }}"
+ - "{{ realm_role_user }}"
- name: Client private
community.general.keycloak_client:
@@ -53,10 +53,10 @@
client_id: "{{ client_name_private }}"
state: present
redirect_uris:
- - "https://my-backend-api.c.org/"
- fullScopeAllowed: True
+ - "https://my-backend-api.c.org/"
+ fullScopeAllowed: true
attributes: '{{client_attributes1}}'
- public_client: False
+ public_client: false
- name: Create a Keycloak client role
community.general.keycloak_role:
@@ -67,9 +67,9 @@
name: "{{ item }}"
realm: "{{ realm }}"
client_id: "{{ client_name_private }}"
- with_items:
- - "{{ client_role_admin }}"
- - "{{ client_role_user }}"
+ with_items:
+ - "{{ client_role_admin }}"
+ - "{{ client_role_user }}"
- name: Client public
community.general.keycloak_client:
@@ -80,10 +80,10 @@
realm: "{{ realm }}"
client_id: "{{ client_name_public }}"
redirect_uris:
- - "https://my-onepage-app-frontend.c.org/"
+ - "https://my-onepage-app-frontend.c.org/"
attributes: '{{client_attributes1}}'
- full_scope_allowed: False
- public_client: True
+ full_scope_allowed: false
+ public_client: true
- name: Map roles to public client
@@ -96,15 +96,15 @@
client_id: "{{ client_name_public }}"
client_scope_id: "{{ client_name_private }}"
role_names:
- - "{{ client_role_admin }}"
- - "{{ client_role_user }}"
+ - "{{ client_role_admin }}"
+ - "{{ client_role_user }}"
register: result
- name: Assert mapping created
assert:
that:
- - result is changed
- - result.end_state | length == 2
+ - result is changed
+ - result.end_state | length == 2
- name: remap role user to public client
community.general.keycloak_client_rolescope:
@@ -116,15 +116,15 @@
client_id: "{{ client_name_public }}"
client_scope_id: "{{ client_name_private }}"
role_names:
- - "{{ client_role_user }}"
+ - "{{ client_role_user }}"
register: result
- name: Assert mapping created
assert:
that:
- - result is not changed
- - result.end_state | length == 2
-
+ - result is not changed
+ - result.end_state | length == 2
+
- name: Remove Map role admin to public client
community.general.keycloak_client_rolescope:
auth_keycloak_url: "{{ url }}"
@@ -135,16 +135,16 @@
client_id: "{{ client_name_public }}"
client_scope_id: "{{ client_name_private }}"
role_names:
- - "{{ client_role_admin }}"
+ - "{{ client_role_admin }}"
state: absent
register: result
- name: Assert mapping deleted
assert:
that:
- - result is changed
- - result.end_state | length == 1
- - result.end_state[0].name == client_role_user
+ - result is changed
+ - result.end_state | length == 1
+ - result.end_state[0].name == client_role_user
- name: Map missing roles to public client
community.general.keycloak_client_rolescope:
@@ -156,15 +156,15 @@
client_id: "{{ client_name_public }}"
client_scope_id: "{{ client_name_private }}"
role_names:
- - "{{ client_role_admin }}"
- - "{{ client_role_not_exists }}"
+ - "{{ client_role_admin }}"
+ - "{{ client_role_not_exists }}"
ignore_errors: true
register: result
- name: Assert failed mapping missing role
assert:
that:
- - result is failed
+ - result is failed
- name: Map roles duplicate
community.general.keycloak_client_rolescope:
@@ -176,15 +176,15 @@
client_id: "{{ client_name_public }}"
client_scope_id: "{{ client_name_private }}"
role_names:
- - "{{ client_role_admin }}"
- - "{{ client_role_admin }}"
+ - "{{ client_role_admin }}"
+ - "{{ client_role_admin }}"
register: result
- name: Assert result
assert:
that:
- - result is changed
- - result.end_state | length == 2
+ - result is changed
+ - result.end_state | length == 2
- name: Map roles to private client
community.general.keycloak_client_rolescope:
@@ -195,14 +195,14 @@
realm: "{{ realm }}"
client_id: "{{ client_name_private }}"
role_names:
- - "{{ realm_role_admin }}"
+ - "{{ realm_role_admin }}"
ignore_errors: true
register: result
- name: Assert failed mapping role to full scope client
assert:
that:
- - result is failed
+ - result is failed
- name: Map realm role to public client
community.general.keycloak_client_rolescope:
@@ -213,14 +213,14 @@
realm: "{{ realm }}"
client_id: "{{ client_name_public }}"
role_names:
- - "{{ realm_role_admin }}"
+ - "{{ realm_role_admin }}"
register: result
- name: Assert result
assert:
that:
- - result is changed
- - result.end_state | length == 1
+ - result is changed
+ - result.end_state | length == 1
- name: Map two realm roles to public client
community.general.keycloak_client_rolescope:
@@ -231,15 +231,15 @@
realm: "{{ realm }}"
client_id: "{{ client_name_public }}"
role_names:
- - "{{ realm_role_admin }}"
- - "{{ realm_role_user }}"
+ - "{{ realm_role_admin }}"
+ - "{{ realm_role_user }}"
register: result
- name: Assert result
assert:
that:
- - result is changed
- - result.end_state | length == 2
+ - result is changed
+ - result.end_state | length == 2
- name: Unmap all realm roles to public client
community.general.keycloak_client_rolescope:
@@ -250,16 +250,16 @@
realm: "{{ realm }}"
client_id: "{{ client_name_public }}"
role_names:
- - "{{ realm_role_admin }}"
- - "{{ realm_role_user }}"
+ - "{{ realm_role_admin }}"
+ - "{{ realm_role_user }}"
state: absent
register: result
- name: Assert result
assert:
that:
- - result is changed
- - result.end_state | length == 0
+ - result is changed
+ - result.end_state | length == 0
- name: Map missing realm role to public client
community.general.keycloak_client_rolescope:
@@ -270,14 +270,14 @@
realm: "{{ realm }}"
client_id: "{{ client_name_public }}"
role_names:
- - "{{ realm_role_not_exists }}"
+ - "{{ realm_role_not_exists }}"
ignore_errors: true
register: result
- name: Assert failed mapping missing realm role
assert:
that:
- - result is failed
+ - result is failed
- name: Check-mode try to Map realm roles to public client
community.general.keycloak_client_rolescope:
@@ -288,17 +288,17 @@
realm: "{{ realm }}"
client_id: "{{ client_name_public }}"
role_names:
- - "{{ realm_role_admin }}"
- - "{{ realm_role_user }}"
+ - "{{ realm_role_admin }}"
+ - "{{ realm_role_user }}"
check_mode: true
register: result
- name: Assert result
assert:
that:
- - result is changed
- - result.end_state | length == 2
-
+ - result is changed
+ - result.end_state | length == 2
+
- name: Check-mode step two, check if change where applied
community.general.keycloak_client_rolescope:
auth_keycloak_url: "{{ url }}"
@@ -313,5 +313,5 @@
- name: Assert result
assert:
that:
- - result is not changed
- - result.end_state | length == 0
\ No newline at end of file
+ - result is not changed
+ - result.end_state | length == 0
diff --git a/tests/integration/targets/keycloak_component_info/tasks/main.yml b/tests/integration/targets/keycloak_component_info/tasks/main.yml
index c0ca5600fc..e84a1f751c 100644
--- a/tests/integration/targets/keycloak_component_info/tasks/main.yml
+++ b/tests/integration/targets/keycloak_component_info/tasks/main.yml
@@ -6,7 +6,7 @@
uri:
url: "{{ url }}/admin/"
status_code: 200
- validate_certs: no
+ validate_certs: false
register: result
until: result.status == 200
retries: 10
@@ -45,8 +45,8 @@
- name: Assert ldap is missing
assert:
that:
- - result is not changed
- - result.components | length == 0
+ - result is not changed
+ - result.components | length == 0
- name: Create new user federation
community.general.keycloak_user_federation:
@@ -103,15 +103,15 @@
- name: Assert ldap exists
assert:
that:
- - result is not changed
- - result.components | length == 1
- - result.components[0].name == federation
+ - result is not changed
+ - result.components | length == 1
+ - result.components[0].name == federation
- name: Save ldap id
set_fact:
myLdapId: "{{ result.components[0].id }}"
-- name: Retrive ldap subcomponents info
+- name: Retrive ldap subcomponents info
community.general.keycloak_component_info:
auth_keycloak_url: "{{ url }}"
auth_realm: "{{ admin_realm }}"
@@ -124,10 +124,10 @@
- name: Assert components exists
assert:
that:
- - result is not changed
- - result.components | length > 0
+ - result is not changed
+ - result.components | length > 0
-- name: Retrive ldap subcomponents filter by name
+- name: Retrive ldap subcomponents filter by name
community.general.keycloak_component_info:
auth_keycloak_url: "{{ url }}"
auth_realm: "{{ admin_realm }}"
@@ -141,11 +141,11 @@
- name: Assert sub component with name "email" exists
assert:
that:
- - result is not changed
- - result.components | length == 1
- - result.components[0].name == "email"
+ - result is not changed
+ - result.components | length == 1
+ - result.components[0].name == "email"
-- name: Retrive ldap subcomponents filter by type
+- name: Retrive ldap subcomponents filter by type
community.general.keycloak_component_info:
auth_keycloak_url: "{{ url }}"
auth_realm: "{{ admin_realm }}"
@@ -159,9 +159,9 @@
- name: Assert ldap sub components filter by type
assert:
that:
- - result is not changed
- - result.components | length > 0
- - result.components[0].providerType == "org.keycloak.storage.ldap.mappers.LDAPStorageMapper"
+ - result is not changed
+ - result.components | length > 0
+ - result.components[0].providerType == "org.keycloak.storage.ldap.mappers.LDAPStorageMapper"
- name: Retrive key info when absent
community.general.keycloak_component_info:
@@ -177,8 +177,8 @@
- name: Assert key is missing
assert:
that:
- - result is not changed
- - result.components | length == 0
+ - result is not changed
+ - result.components | length == 0
- name: Create custom realm key
community.general.keycloak_realm_key:
@@ -211,8 +211,8 @@
- name: Assert key exists
assert:
that:
- - result is not changed
- - result.components | length == 1
+ - result is not changed
+ - result.components | length == 1
- name: Retrive all realm components
community.general.keycloak_component_info:
@@ -226,8 +226,8 @@
- name: Assert key exists
assert:
that:
- - result is not changed
- - result.components | length > 0
+ - result is not changed
+ - result.components | length > 0
- name: Retrive all ldap in realm
community.general.keycloak_component_info:
@@ -242,10 +242,10 @@
- name: Assert key exists
assert:
that:
- - result is not changed
- - result.components | length == 1
- - result.components[0].providerType == "org.keycloak.storage.UserStorageProvider"
- - result.components[0].name == "myldap"
+ - result is not changed
+ - result.components | length == 1
+ - result.components[0].providerType == "org.keycloak.storage.UserStorageProvider"
+ - result.components[0].name == "myldap"
- name: Retrive component by name only
community.general.keycloak_component_info:
@@ -260,7 +260,7 @@
- name: Assert key exists
assert:
that:
- - result is not changed
- - result.components | length == 1
- - result.components[0].providerType == "org.keycloak.keys.KeyProvider"
- - result.components[0].name == realm_key_name
+ - result is not changed
+ - result.components | length == 1
+ - result.components[0].providerType == "org.keycloak.keys.KeyProvider"
+ - result.components[0].name == realm_key_name
diff --git a/tests/integration/targets/keycloak_group/tasks/main.yml b/tests/integration/targets/keycloak_group/tasks/main.yml
index f807b0640d..df567d7db7 100644
--- a/tests/integration/targets/keycloak_group/tasks/main.yml
+++ b/tests/integration/targets/keycloak_group/tasks/main.yml
@@ -145,11 +145,11 @@
realm: "{{ realm }}"
name: my-new_group
attributes:
- attrib1: value1
- attrib2: value2
- attrib3:
- - item1
- - item2
+ attrib1: value1
+ attrib2: value2
+ attrib3:
+ - item1
+ - item2
register: result
- name: Assert that group was correctly created
diff --git a/tests/integration/targets/keycloak_modules_authentication/tasks/main.yml b/tests/integration/targets/keycloak_modules_authentication/tasks/main.yml
index 1553e29c1c..b788865de9 100644
--- a/tests/integration/targets/keycloak_modules_authentication/tasks/main.yml
+++ b/tests/integration/targets/keycloak_modules_authentication/tasks/main.yml
@@ -3,6 +3,19 @@
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
+- name: Reset public login in master admin-cli (if potentially previous test failed)
+ community.general.keycloak_client:
+ auth_keycloak_url: "{{ url }}"
+ auth_realm: "{{ admin_realm }}"
+ auth_username: "{{ admin_user }}"
+ auth_password: "{{ admin_password }}"
+ auth_client_id: "admin-cli"
+ auth_client_secret: "{{ client_secret }}"
+ client_id: "admin-cli"
+ secret: "{{ client_secret }}"
+ public_client: true
+ state: present
+
- name: Create realm
community.general.keycloak_realm:
auth_keycloak_url: "{{ url }}"
@@ -201,6 +214,89 @@
debug:
var: result
+- name: PREPARE - Temporarily disable public login in master admin-cli
+ community.general.keycloak_client:
+ auth_keycloak_url: "{{ url }}"
+ auth_realm: "{{ admin_realm }}"
+ auth_username: "{{ admin_user }}"
+ auth_password: "{{ admin_password }}"
+ auth_client_id: "admin-cli"
+ auth_client_secret: "{{ client_secret }}"
+ client_id: "admin-cli"
+ secret: "{{ client_secret }}"
+ public_client: false
+ service_accounts_enabled: true
+ client_authenticator_type: "client-secret"
+ state: present
+
+- name: PREPARE - Get admin role id
+ community.general.keycloak_role:
+ auth_keycloak_url: "{{ url }}"
+ auth_realm: "{{ admin_realm }}"
+ auth_username: "{{ admin_user }}"
+ auth_password: "{{ admin_password }}"
+ auth_client_id: "admin-cli"
+ auth_client_secret: "{{ client_secret }}"
+ name: "admin"
+ register: admin_role
+
+- name: PREPARE - Assign admin role to admin-cli in master
+ community.general.keycloak_user_rolemapping:
+ auth_keycloak_url: "{{ url }}"
+ auth_realm: "{{ admin_realm }}"
+ auth_username: "{{ admin_user }}"
+ auth_password: "{{ admin_password }}"
+ auth_client_id: "admin-cli"
+ auth_client_secret: "{{ client_secret }}"
+ realm: "master"
+ roles:
+ - name: "admin"
+ service_account_user_client_id: "admin-cli"
+
+- name: Create new realm role with valid client_id and client_secret
+ community.general.keycloak_role:
+ auth_keycloak_url: "{{ url }}"
+ auth_realm: "{{ admin_realm }}"
+ auth_client_id: "admin-cli"
+ auth_client_secret: "{{ client_secret }}"
+ realm: "{{ realm }}"
+ name: "{{ role }}"
+ description: "{{ keycloak_role_description }}"
+ state: present
+ register: result
+
+- name: Debug
+ debug:
+ var: result
+
+- name: Reset temporarily disabled public login in master admin-cli
+ community.general.keycloak_client:
+ auth_keycloak_url: "{{ url }}"
+ auth_realm: "{{ admin_realm }}"
+ auth_username: "{{ admin_user }}"
+ auth_password: "{{ admin_password }}"
+ auth_client_id: "admin-cli"
+ auth_client_secret: "{{ client_secret }}"
+ client_id: "admin-cli"
+ secret: "{{ client_secret }}"
+ public_client: true
+ state: present
+
+- name: Remove created realm role
+ community.general.keycloak_role:
+ auth_keycloak_url: "{{ url }}"
+ auth_realm: "{{ admin_realm }}"
+ auth_username: "{{ admin_user }}"
+ auth_password: "{{ admin_password }}"
+ realm: "{{ realm }}"
+ name: "{{ role }}"
+ state: absent
+ register: result
+
+- name: Debug
+ debug:
+ var: result
+
### Unhappy path tests
- name: Fail to create new realm role with invalid username/password
@@ -215,7 +311,6 @@
state: present
register: result
failed_when: >
- (result.exception is not defined) or
("HTTP Error 401: Unauthorized" not in result.msg)
- name: Fail to create new realm role with invalid auth token
@@ -228,7 +323,6 @@
state: present
register: result
failed_when: >
- (result.exception is not defined) or
("HTTP Error 401: Unauthorized" not in result.msg)
- name: Fail to create new realm role with invalid auth and refresh tokens, and invalid username/password
@@ -245,5 +339,4 @@
state: present
register: result
failed_when: >
- (result.exception is not defined) or
("HTTP Error 401: Unauthorized" not in result.msg)
diff --git a/tests/integration/targets/keycloak_modules_authentication/vars/main.yml b/tests/integration/targets/keycloak_modules_authentication/vars/main.yml
index 02ad618e1b..f57d791d86 100644
--- a/tests/integration/targets/keycloak_modules_authentication/vars/main.yml
+++ b/tests/integration/targets/keycloak_modules_authentication/vars/main.yml
@@ -9,6 +9,7 @@ admin_user: admin
admin_password: password
realm: myrealm
client_id: myclient
+client_secret: myclientsecret
role: myrole
keycloak_role_name: test
diff --git a/tests/integration/targets/keycloak_role/vars/main.yml b/tests/integration/targets/keycloak_role/vars/main.yml
index 0af55dfc5c..1da126873e 100644
--- a/tests/integration/targets/keycloak_role/vars/main.yml
+++ b/tests/integration/targets/keycloak_role/vars/main.yml
@@ -17,10 +17,10 @@ keycloak_role_name: test
keycloak_role_description: test
keycloak_role_composite: true
keycloak_role_composites:
- - name: view-clients
+ - name: view-clients
client_id: "realm-management"
state: present
- - name: query-clients
+ - name: query-clients
client_id: "realm-management"
state: present
- name: offline_access
@@ -31,10 +31,10 @@ keycloak_client_description: This is a client for testing purpose
role_state: present
keycloak_role_composites_with_absent:
- - name: view-clients
+ - name: view-clients
client_id: "realm-management"
state: present
- - name: query-clients
+ - name: query-clients
client_id: "realm-management"
state: present
- name: offline_access
diff --git a/tests/integration/targets/keycloak_user_rolemapping/tasks/main.yml b/tests/integration/targets/keycloak_user_rolemapping/tasks/main.yml
index 464fab5786..ff56371e58 100644
--- a/tests/integration/targets/keycloak_user_rolemapping/tasks/main.yml
+++ b/tests/integration/targets/keycloak_user_rolemapping/tasks/main.yml
@@ -37,7 +37,8 @@
- name: Map a realm role to client service account
vars:
- - roles: [ {'name': '{{ role }}'} ]
+ - roles:
+ - name: '{{ role }}'
community.general.keycloak_user_rolemapping:
auth_keycloak_url: "{{ url }}"
auth_realm: "{{ admin_realm }}"
@@ -57,7 +58,8 @@
- name: Unmap a realm role from client service account
vars:
- - roles: [ {'name': '{{ role }}'} ]
+ - roles:
+ - name: '{{ role }}'
community.general.keycloak_user_rolemapping:
auth_keycloak_url: "{{ url }}"
auth_realm: "{{ admin_realm }}"
@@ -101,7 +103,8 @@
- name: Map a client role to client service account
vars:
- - roles: [ {'name': '{{ role }}'} ]
+ - roles:
+ - name: '{{ role }}'
community.general.keycloak_user_rolemapping:
auth_keycloak_url: "{{ url }}"
auth_realm: "{{ admin_realm }}"
@@ -122,7 +125,8 @@
- name: Unmap a client role from client service account
vars:
- - roles: [ {'name': '{{ role }}'} ]
+ - roles:
+ - name: '{{ role }}'
community.general.keycloak_user_rolemapping:
auth_keycloak_url: "{{ url }}"
auth_realm: "{{ admin_realm }}"
diff --git a/tests/integration/targets/keycloak_userprofile/tasks/main.yml b/tests/integration/targets/keycloak_userprofile/tasks/main.yml
index 37b65d35ed..8ecce1a728 100644
--- a/tests/integration/targets/keycloak_userprofile/tasks/main.yml
+++ b/tests/integration/targets/keycloak_userprofile/tasks/main.yml
@@ -180,7 +180,7 @@
# parent_id: "{{ realm }}"
# config: "{{ config_updated }}"
# register: result
-#
+#
# - name: Assert that forced update ran correctly
# assert:
# that:
@@ -292,7 +292,7 @@
- name: Remove Keycloak test realm
community.general.keycloak_realm:
- auth_keycloak_url: "{{ url }}"
+ auth_keycloak_url: "{{ url }}"
auth_realm: "{{ admin_realm }}"
auth_username: "{{ admin_user }}"
auth_password: "{{ admin_password }}"
diff --git a/tests/integration/targets/keycloak_userprofile/vars/main.yml b/tests/integration/targets/keycloak_userprofile/vars/main.yml
index 1f8ae6c823..b423a677c0 100644
--- a/tests/integration/targets/keycloak_userprofile/vars/main.yml
+++ b/tests/integration/targets/keycloak_userprofile/vars/main.yml
@@ -9,69 +9,69 @@ admin_user: admin
admin_password: password
realm: realm_userprofile_test
attributes_default:
- - name: username
- displayName: ${username}
- validations:
- length:
- min: 3
- max: 255
- usernameProhibitedCharacters: {}
- up_username_not_idn_homograph: {}
- annotations: {}
- permissions:
- view:
- - admin
- - user
- edit: []
- multivalued: false
- - name: email
- displayName: ${email}
- validations:
- email: {}
- length:
- max: 255
- annotations: {}
- required:
- roles:
- - user
- permissions:
- view:
- - admin
- - user
- edit: []
- multivalued: false
- - name: firstName
- displayName: ${firstName}
- validations:
- length:
- max: 255
- personNameProhibitedCharacters: {}
- annotations: {}
- required:
- roles:
- - user
- permissions:
- view:
- - admin
- - user
- edit: []
- multivalued: false
- - name: lastName
- displayName: ${lastName}
- validations:
- length:
- max: 255
- person_name_prohibited_characters: {}
- annotations: {}
- required:
- roles:
- - user
- permissions:
- view:
- - admin
- - user
- edit: []
- multivalued: false
+ - name: username
+ displayName: ${username}
+ validations:
+ length:
+ min: 3
+ max: 255
+ usernameProhibitedCharacters: {}
+ up_username_not_idn_homograph: {}
+ annotations: {}
+ permissions:
+ view:
+ - admin
+ - user
+ edit: []
+ multivalued: false
+ - name: email
+ displayName: ${email}
+ validations:
+ email: {}
+ length:
+ max: 255
+ annotations: {}
+ required:
+ roles:
+ - user
+ permissions:
+ view:
+ - admin
+ - user
+ edit: []
+ multivalued: false
+ - name: firstName
+ displayName: ${firstName}
+ validations:
+ length:
+ max: 255
+ personNameProhibitedCharacters: {}
+ annotations: {}
+ required:
+ roles:
+ - user
+ permissions:
+ view:
+ - admin
+ - user
+ edit: []
+ multivalued: false
+ - name: lastName
+ displayName: ${lastName}
+ validations:
+ length:
+ max: 255
+ person_name_prohibited_characters: {}
+ annotations: {}
+ required:
+ roles:
+ - user
+ permissions:
+ view:
+ - admin
+ - user
+ edit: []
+ multivalued: false
attributes_additional:
- name: additionalAttribute
displayName: additionalAttribute
diff --git a/tests/integration/targets/keyring/tasks/main.yml b/tests/integration/targets/keyring/tasks/main.yml
index 3833018e80..2aa05fc0a6 100644
--- a/tests/integration/targets/keyring/tasks/main.yml
+++ b/tests/integration/targets/keyring/tasks/main.yml
@@ -5,7 +5,7 @@
- name: Ensure required packages for headless keyring access are installed (RPM)
ansible.builtin.package:
- name: gnome-keyring
+ name: gnome-keyring
become: true
when: "'localhost' not in inventory_hostname"
diff --git a/tests/integration/targets/ldap_inc/tasks/tests/basic.yml b/tests/integration/targets/ldap_inc/tasks/tests/basic.yml
index 4165ece743..5eb1a535ff 100644
--- a/tests/integration/targets/ldap_inc/tasks/tests/basic.yml
+++ b/tests/integration/targets/ldap_inc/tasks/tests/basic.yml
@@ -21,10 +21,10 @@
- name: assert that test increment by default
assert:
that:
- - output is not failed
- - output.incremented
- - output.value == "1001"
- - output.rfc4525
+ - output is not failed
+ - output.incremented
+ - output.value == "1001"
+ - output.rfc4525
- name: Test defined increment
ldap_inc:
@@ -39,10 +39,10 @@
- name: assert that test increment by default
assert:
that:
- - output is not failed
- - output.incremented
- - output.value == "1003"
- - output.rfc4525
+ - output is not failed
+ - output.incremented
+ - output.value == "1003"
+ - output.rfc4525
- name: Test defined increment by 0
ldap_inc:
@@ -57,9 +57,9 @@
- name: assert that test defined increment by 0
assert:
that:
- - output is not failed
- - output.incremented == false
- - output.value == "1003"
+ - output is not failed
+ - output.incremented == false
+ - output.value == "1003"
- name: Test defined negative increment
ldap_inc:
@@ -74,10 +74,10 @@
- name: assert that test defined negative increment
assert:
that:
- - output is not failed
- - output.incremented
- - output.value == "1002"
- - output.rfc4525
+ - output is not failed
+ - output.incremented
+ - output.value == "1002"
+ - output.rfc4525
- name: Test forcing classic method instead of automatic detection
ldap_inc:
@@ -93,7 +93,7 @@
- name: assert that test defined negative increment
assert:
that:
- - output is not failed
- - output.incremented
- - output.value == "1001"
- - output.rfc4525 == False
+ - output is not failed
+ - output.incremented
+ - output.value == "1001"
+ - output.rfc4525 == False
diff --git a/tests/integration/targets/ldap_search/tasks/tests/auth.yml b/tests/integration/targets/ldap_search/tasks/tests/auth.yml
index a8c7a13ee9..912178c364 100644
--- a/tests/integration/targets/ldap_search/tasks/tests/auth.yml
+++ b/tests/integration/targets/ldap_search/tasks/tests/auth.yml
@@ -22,9 +22,9 @@
- name: assert that test LDAP user can read its password
assert:
that:
- - output is not failed
- - output.results | length == 1
- - output.results.0.userPassword is defined
+ - output is not failed
+ - output.results | length == 1
+ - output.results.0.userPassword is defined
- name: Test simple search for cert authenticated user
ldap_search:
@@ -42,6 +42,6 @@
- name: assert that test LDAP user can read its password
assert:
that:
- - output is not failed
- - output.results | length == 1
- - output.results.0.userPassword is defined
+ - output is not failed
+ - output.results | length == 1
+ - output.results.0.userPassword is defined
diff --git a/tests/integration/targets/ldap_search/tasks/tests/basic.yml b/tests/integration/targets/ldap_search/tasks/tests/basic.yml
index 11e5d6562c..5b98c61648 100644
--- a/tests/integration/targets/ldap_search/tasks/tests/basic.yml
+++ b/tests/integration/targets/ldap_search/tasks/tests/basic.yml
@@ -20,9 +20,9 @@
- name: assert that test LDAP user can be found
assert:
that:
- - output is not failed
- - output.results | length == 1
- - output.results.0.displayName == "LDAP Test"
+ - output is not failed
+ - output.results | length == 1
+ - output.results.0.displayName == "LDAP Test"
- name: Test simple search for a user with no results
ldap_search:
@@ -35,5 +35,5 @@
- name: assert that the output is empty
assert:
that:
- - output is not failed
- - output.results | length == 0
+ - output is not failed
+ - output.results | length == 0
diff --git a/tests/integration/targets/ldap_search/tasks/tests/pages.yml b/tests/integration/targets/ldap_search/tasks/tests/pages.yml
index 32575854ba..e0742c5598 100644
--- a/tests/integration/targets/ldap_search/tasks/tests/pages.yml
+++ b/tests/integration/targets/ldap_search/tasks/tests/pages.yml
@@ -20,5 +20,5 @@
- name: assert that the right number of results are returned
assert:
that:
- - output is not failed
- - output.results | length == 2
+ - output is not failed
+ - output.results | length == 2
diff --git a/tests/integration/targets/ldap_search/tasks/tests/schema.yml b/tests/integration/targets/ldap_search/tasks/tests/schema.yml
index 892eac3cb3..ca26305b82 100644
--- a/tests/integration/targets/ldap_search/tasks/tests/schema.yml
+++ b/tests/integration/targets/ldap_search/tasks/tests/schema.yml
@@ -20,6 +20,7 @@
- name: Assert that the schema output is correct
assert:
that:
- - output is not failed
- - output.results | length >= 1
- - "{{ 'displayName' in output.results.0.attrs }}"
+ - output is not failed
+ - output.results | length >= 1
+ - >-
+ 'displayName' in output.results.0.attrs
diff --git a/tests/integration/targets/listen_ports_facts/tasks/main.yml b/tests/integration/targets/listen_ports_facts/tasks/main.yml
index 0e583e7a13..5da5b03784 100644
--- a/tests/integration/targets/listen_ports_facts/tasks/main.yml
+++ b/tests/integration/targets/listen_ports_facts/tasks/main.yml
@@ -110,3 +110,32 @@
loop: "{{ [tcp_listen, udp_listen]|flatten }}"
when: item.name == 'nc'
ignore_errors: true
+
+
+- when: ansible_os_family == "Debian"
+ block:
+ - name: Remove netstat and ss dependencies to simulate missing executables
+ ansible.builtin.package:
+ name:
+ - net-tools
+ - iproute2
+ state: absent
+ ignore_errors: true
+
+ - name: Trigger listen_ports_facts with missing tools
+ community.general.listen_ports_facts:
+ register: listen_ports_failure_result
+ ignore_errors: true
+
+ - name: Assert graceful failure when dependencies are missing
+ ansible.builtin.assert:
+ that:
+ - listen_ports_failure_result is failed
+ - "'Unable to find any of the supported commands' in listen_ports_failure_result.msg"
+
+ - name: Reinstall netstat and ss dependencies after test
+ ansible.builtin.package:
+ name:
+ - net-tools
+ - iproute2
+ state: present
diff --git a/tests/integration/targets/lookup_cartesian/tasks/main.yml b/tests/integration/targets/lookup_cartesian/tasks/main.yml
index 5575f22ba6..3351537ffc 100644
--- a/tests/integration/targets/lookup_cartesian/tasks/main.yml
+++ b/tests/integration/targets/lookup_cartesian/tasks/main.yml
@@ -12,21 +12,21 @@
debug: var=item
register: product
with_community.general.cartesian:
- - - A
- - B
- - C
- - - '1'
- - '2'
- - '3'
+ - - A
+ - B
+ - C
+ - - '1'
+ - '2'
+ - '3'
- name: Verify cartesian lookup
assert:
that:
- - product.results[0]['item'] == ["A", "1"]
- - product.results[1]['item'] == ["A", "2"]
- - product.results[2]['item'] == ["A", "3"]
- - product.results[3]['item'] == ["B", "1"]
- - product.results[4]['item'] == ["B", "2"]
- - product.results[5]['item'] == ["B", "3"]
- - product.results[6]['item'] == ["C", "1"]
- - product.results[7]['item'] == ["C", "2"]
- - product.results[8]['item'] == ["C", "3"]
+ - product.results[0]['item'] == ["A", "1"]
+ - product.results[1]['item'] == ["A", "2"]
+ - product.results[2]['item'] == ["A", "3"]
+ - product.results[3]['item'] == ["B", "1"]
+ - product.results[4]['item'] == ["B", "2"]
+ - product.results[5]['item'] == ["B", "3"]
+ - product.results[6]['item'] == ["C", "1"]
+ - product.results[7]['item'] == ["C", "2"]
+ - product.results[8]['item'] == ["C", "3"]
diff --git a/tests/integration/targets/lookup_etcd3/defaults/main.yml b/tests/integration/targets/lookup_etcd3/defaults/main.yml
index de726382ba..68751f3d64 100644
--- a/tests/integration/targets/lookup_etcd3/defaults/main.yml
+++ b/tests/integration/targets/lookup_etcd3/defaults/main.yml
@@ -3,5 +3,5 @@
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
- etcd3_prefix: '/keyprefix/'
- etcd3_singlekey: '/singlekeypath'
+etcd3_prefix: '/keyprefix/'
+etcd3_singlekey: '/singlekeypath'
diff --git a/tests/integration/targets/lookup_etcd3/tasks/main.yml b/tests/integration/targets/lookup_etcd3/tasks/main.yml
index 47f1916c02..2e150452b2 100644
--- a/tests/integration/targets/lookup_etcd3/tasks/main.yml
+++ b/tests/integration/targets/lookup_etcd3/tasks/main.yml
@@ -15,9 +15,9 @@
value: "bar{{ item }}"
state: present
loop:
- - 1
- - 2
- - 3
+ - 1
+ - 2
+ - 3
- name: put a single key/values in etcd
etcd3:
diff --git a/tests/integration/targets/lookup_etcd3/tasks/tests.yml b/tests/integration/targets/lookup_etcd3/tasks/tests.yml
index 929c6f142a..132d2ce9ac 100644
--- a/tests/integration/targets/lookup_etcd3/tasks/tests.yml
+++ b/tests/integration/targets/lookup_etcd3/tasks/tests.yml
@@ -5,23 +5,23 @@
# SPDX-License-Identifier: GPL-3.0-or-later
- block:
- - name: 'Fetch secrets using "etcd3" lookup'
- set_fact:
- etcdoutkey1: "{{ lookup('community.general.etcd3', etcd3_prefix, prefix=True) }}"
- etcdoutkey2: "{{ lookup('community.general.etcd3', etcd3_singlekey) }}"
- key_inexistent: "{{ lookup('community.general.etcd3', 'inexistent_key') }}"
+ - name: 'Fetch secrets using "etcd3" lookup'
+ set_fact:
+ etcdoutkey1: "{{ lookup('community.general.etcd3', etcd3_prefix, prefix=True) }}"
+ etcdoutkey2: "{{ lookup('community.general.etcd3', etcd3_singlekey) }}"
+ key_inexistent: "{{ lookup('community.general.etcd3', 'inexistent_key') }}"
- - name: 'Check etcd values'
- assert:
- msg: 'unexpected etcd3 values'
- that:
- - etcdoutkey1 is sequence
- - etcdoutkey1 | length() == 3
- - etcdoutkey1[0].value == 'bar1'
- - etcdoutkey1[1].value == 'bar2'
- - etcdoutkey1[2].value == 'bar3'
- - etcdoutkey2 is sequence
- - etcdoutkey2 | length() == 2
- - etcdoutkey2.value == 'foobar'
- - key_inexistent is sequence
- - key_inexistent | length() == 0
+ - name: 'Check etcd values'
+ assert:
+ msg: 'unexpected etcd3 values'
+ that:
+ - etcdoutkey1 is sequence
+ - etcdoutkey1 | length() == 3
+ - etcdoutkey1[0].value == 'bar1'
+ - etcdoutkey1[1].value == 'bar2'
+ - etcdoutkey1[2].value == 'bar3'
+ - etcdoutkey2 is sequence
+ - etcdoutkey2 | length() == 2
+ - etcdoutkey2.value == 'foobar'
+ - key_inexistent is sequence
+ - key_inexistent | length() == 0
diff --git a/tests/integration/targets/lookup_flattened/tasks/main.yml b/tests/integration/targets/lookup_flattened/tasks/main.yml
index 37af1327bf..b4d57eb176 100644
--- a/tests/integration/targets/lookup_flattened/tasks/main.yml
+++ b/tests/integration/targets/lookup_flattened/tasks/main.yml
@@ -11,14 +11,14 @@
- name: test with_flattened
set_fact: '{{ item }}=flattened'
with_community.general.flattened:
- - - a__
- - - b__
- - - c__
- - d__
+ - - a__
+ - - b__
+ - - c__
+ - d__
- name: verify with_flattened results
assert:
that:
- - a__ == 'flattened'
- - b__ == 'flattened'
- - c__ == 'flattened'
- - d__ == 'flattened'
+ - a__ == 'flattened'
+ - b__ == 'flattened'
+ - c__ == 'flattened'
+ - d__ == 'flattened'
diff --git a/tests/integration/targets/lookup_lmdb_kv/aliases b/tests/integration/targets/lookup_lmdb_kv/aliases
index 9c7febe241..5ecc8bfd38 100644
--- a/tests/integration/targets/lookup_lmdb_kv/aliases
+++ b/tests/integration/targets/lookup_lmdb_kv/aliases
@@ -5,3 +5,4 @@
azp/posix/2
destructive
skip/aix
+disabled # TODO: currently broken
diff --git a/tests/integration/targets/lookup_lmdb_kv/test.yml b/tests/integration/targets/lookup_lmdb_kv/test.yml
index 8a88bca456..4d2ea0d973 100644
--- a/tests/integration/targets/lookup_lmdb_kv/test.yml
+++ b/tests/integration/targets/lookup_lmdb_kv/test.yml
@@ -5,27 +5,27 @@
- hosts: localhost
tasks:
- - debug:
- msg: '{{ query("community.general.lmdb_kv", "nl", "be", "lu", db="jp.mdb") }}'
- - debug:
- var: item.1
- loop: '{{ query("community.general.lmdb_kv", db="jp.mdb") }}'
- - assert:
- that:
- - query('community.general.lmdb_kv', 'nl', 'be', 'lu', db='jp.mdb') == ['Netherlands', 'Belgium', 'Luxembourg']
- - query('community.general.lmdb_kv', db='jp.mdb')|length == 5
- - assert:
- that:
- - item.0 == 'nl'
- - item.1 == 'Netherlands'
- vars:
- lmdb_kv_db: jp.mdb
- with_community.general.lmdb_kv:
- - n*
- - assert:
- that:
- - item == 'Belgium'
- vars:
- lmdb_kv_db: jp.mdb
- with_community.general.lmdb_kv:
- - be
+ - debug:
+ msg: '{{ query("community.general.lmdb_kv", "nl", "be", "lu", db="jp.mdb") }}'
+ - debug:
+ var: item.1
+ loop: '{{ query("community.general.lmdb_kv", db="jp.mdb") }}'
+ - assert:
+ that:
+ - query('community.general.lmdb_kv', 'nl', 'be', 'lu', db='jp.mdb') == ['Netherlands', 'Belgium', 'Luxembourg']
+ - query('community.general.lmdb_kv', db='jp.mdb')|length == 5
+ - assert:
+ that:
+ - item.0 == 'nl'
+ - item.1 == 'Netherlands'
+ vars:
+ lmdb_kv_db: jp.mdb
+ with_community.general.lmdb_kv:
+ - n*
+ - assert:
+ that:
+ - item == 'Belgium'
+ vars:
+ lmdb_kv_db: jp.mdb
+ with_community.general.lmdb_kv:
+ - be
diff --git a/tests/integration/targets/lookup_passwordstore/tasks/package.yml b/tests/integration/targets/lookup_passwordstore/tasks/package.yml
index e5ccd5677d..43f44401e8 100644
--- a/tests/integration/targets/lookup_passwordstore/tasks/package.yml
+++ b/tests/integration/targets/lookup_passwordstore/tasks/package.yml
@@ -23,17 +23,17 @@
- block:
# OpenSUSE Leap>=15.0 don't include password-store in main repo
- - name: SUSE | Add security:privacy repo
- template:
- src: security-privacy.repo.j2
- dest: /etc/zypp/repos.d/security:privacy.repo
+ - name: SUSE | Add security:privacy repo
+ template:
+ src: security-privacy.repo.j2
+ dest: /etc/zypp/repos.d/security:privacy.repo
- - name: SUSE | Install package
- package:
- name: password-store
- state: present
- update_cache: true
- disable_gpg_check: true
+ - name: SUSE | Install package
+ package:
+ name: password-store
+ state: present
+ update_cache: true
+ disable_gpg_check: true
when: ansible_facts.pkg_mgr in ['zypper', 'community.general.zypper']
# See https://github.com/gopasspw/gopass/issues/1849#issuecomment-802789285
@@ -41,20 +41,20 @@
when: ansible_facts.os_family == 'Debian'
become: true
block:
- - name: Fetch gopass repo keyring
- ansible.builtin.get_url:
- url: https://packages.gopass.pw/repos/gopass/gopass-archive-keyring.gpg
- dest: /usr/share/keyrings/gopass-archive-keyring.gpg
- - name: Add gopass repo
- ansible.builtin.apt_repository:
- repo: "deb [arch=amd64,arm64,armhf \
- signed-by=/usr/share/keyrings/gopass-archive-keyring.gpg] \
- https://packages.gopass.pw/repos/gopass stable main"
- state: present
- - name: Update apt-cache and install gopass package
- ansible.builtin.apt:
- name: gopass
- update_cache: true
+ - name: Fetch gopass repo keyring
+ ansible.builtin.get_url:
+ url: https://packages.gopass.pw/repos/gopass/gopass-archive-keyring.gpg
+ dest: /usr/share/keyrings/gopass-archive-keyring.gpg
+ - name: Add gopass repo
+ ansible.builtin.apt_repository:
+ repo: "deb [arch=amd64,arm64,armhf \
+ signed-by=/usr/share/keyrings/gopass-archive-keyring.gpg] \
+ https://packages.gopass.pw/repos/gopass stable main"
+ state: present
+ - name: Update apt-cache and install gopass package
+ ansible.builtin.apt:
+ name: gopass
+ update_cache: true
- name: Install on macOS
when: ansible_facts.distribution == 'MacOSX'
diff --git a/tests/integration/targets/lookup_passwordstore/tasks/password_tests.yml b/tests/integration/targets/lookup_passwordstore/tasks/password_tests.yml
index a94529e460..e3a8b4e4b7 100644
--- a/tests/integration/targets/lookup_passwordstore/tasks/password_tests.yml
+++ b/tests/integration/targets/lookup_passwordstore/tasks/password_tests.yml
@@ -3,128 +3,128 @@
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
- - name: Create a password ({{ backend }})
- set_fact:
- newpass: "{{ lookup('community.general.passwordstore', 'test-pass', length=8, create=true, backend=backend) }}"
+- name: Create a password ({{ backend }})
+ set_fact:
+ newpass: "{{ lookup('community.general.passwordstore', 'test-pass', length=8, create=true, backend=backend) }}"
- - name: Fetch password from an existing file ({{ backend }})
- set_fact:
- readpass: "{{ lookup('community.general.passwordstore', 'test-pass', backend=backend) }}"
+- name: Fetch password from an existing file ({{ backend }})
+ set_fact:
+ readpass: "{{ lookup('community.general.passwordstore', 'test-pass', backend=backend) }}"
- - name: Verify password ({{ backend }})
- assert:
- that:
- - readpass == newpass
+- name: Verify password ({{ backend }})
+ assert:
+ that:
+ - readpass == newpass
- - name: Create a password with equal sign ({{ backend }})
- set_fact:
- newpass: "{{ lookup('community.general.passwordstore', 'test-pass-equal userpass=SimpleSample= create=true', backend=backend) }}"
+- name: Create a password with equal sign ({{ backend }})
+ set_fact:
+ newpass: "{{ lookup('community.general.passwordstore', 'test-pass-equal userpass=SimpleSample= create=true', backend=backend) }}"
- - name: Fetch a password with equal sign ({{ backend }})
- set_fact:
- readpass: "{{ lookup('community.general.passwordstore', 'test-pass-equal', backend=backend) }}"
+- name: Fetch a password with equal sign ({{ backend }})
+ set_fact:
+ readpass: "{{ lookup('community.general.passwordstore', 'test-pass-equal', backend=backend) }}"
- - name: Verify password ({{ backend }})
- assert:
- that:
- - readpass == newpass
+- name: Verify password ({{ backend }})
+ assert:
+ that:
+ - readpass == newpass
- - name: Create a password using missing=create ({{ backend }})
- set_fact:
- newpass: "{{ lookup('community.general.passwordstore', 'test-missing-create', missing='create', length=8, backend=backend) }}"
+- name: Create a password using missing=create ({{ backend }})
+ set_fact:
+ newpass: "{{ lookup('community.general.passwordstore', 'test-missing-create', missing='create', length=8, backend=backend) }}"
- - name: Fetch password from an existing file ({{ backend }})
- set_fact:
- readpass: "{{ lookup('community.general.passwordstore', 'test-missing-create', backend=backend) }}"
+- name: Fetch password from an existing file ({{ backend }})
+ set_fact:
+ readpass: "{{ lookup('community.general.passwordstore', 'test-missing-create', backend=backend) }}"
- - name: Verify password ({{ backend }})
- assert:
- that:
- - readpass == newpass
+- name: Verify password ({{ backend }})
+ assert:
+ that:
+ - readpass == newpass
- - name: Fetch password from existing file using missing=empty ({{ backend }})
- set_fact:
- readpass: "{{ lookup('community.general.passwordstore', 'test-missing-create', missing='empty', backend=backend) }}"
+- name: Fetch password from existing file using missing=empty ({{ backend }})
+ set_fact:
+ readpass: "{{ lookup('community.general.passwordstore', 'test-missing-create', missing='empty', backend=backend) }}"
- - name: Verify password ({{ backend }})
- assert:
- that:
- - readpass == newpass
+- name: Verify password ({{ backend }})
+ assert:
+ that:
+ - readpass == newpass
- - name: Fetch password from non-existing file using missing=empty ({{ backend }})
- set_fact:
- readpass: "{{ query('community.general.passwordstore', 'test-missing-pass', missing='empty', backend=backend) }}"
+- name: Fetch password from non-existing file using missing=empty ({{ backend }})
+ set_fact:
+ readpass: "{{ query('community.general.passwordstore', 'test-missing-pass', missing='empty', backend=backend) }}"
- - name: Verify password ({{ backend }})
- assert:
- that:
- - readpass == [ none ]
+- name: Verify password ({{ backend }})
+ assert:
+ that:
+ - readpass == [ none ]
- - name: Create the YAML password ({{ backend }})
- command: "{{ backend }} insert -m -f test-yaml-pass"
- args:
- stdin: |
- testpassword
- key: |
- multi
- line
+- name: Create the YAML password ({{ backend }})
+ command: "{{ backend }} insert -m -f test-yaml-pass"
+ args:
+ stdin: |
+ testpassword
+ key: |
+ multi
+ line
- - name: Fetch a password with YAML subkey ({{ backend }})
- set_fact:
- readyamlpass: "{{ lookup('community.general.passwordstore', 'test-yaml-pass', subkey='key', backend=backend) }}"
+- name: Fetch a password with YAML subkey ({{ backend }})
+ set_fact:
+ readyamlpass: "{{ lookup('community.general.passwordstore', 'test-yaml-pass', subkey='key', backend=backend) }}"
- - name: Read a yaml subkey ({{ backend }})
- assert:
- that:
- - readyamlpass == 'multi\nline\n'
+- name: Read a yaml subkey ({{ backend }})
+ assert:
+ that:
+ - readyamlpass == 'multi\nline\n'
- - name: Create a non-YAML multiline file ({{ backend }})
- command: "{{ backend }} insert -m -f test-multiline-pass"
- args:
- stdin: |
- testpassword
- random additional line
+- name: Create a non-YAML multiline file ({{ backend }})
+ command: "{{ backend }} insert -m -f test-multiline-pass"
+ args:
+ stdin: |
+ testpassword
+ random additional line
- - name: Fetch password from multiline file ({{ backend }})
- set_fact:
- readyamlpass: "{{ lookup('community.general.passwordstore', 'test-multiline-pass', backend=backend) }}"
+- name: Fetch password from multiline file ({{ backend }})
+ set_fact:
+ readyamlpass: "{{ lookup('community.general.passwordstore', 'test-multiline-pass', backend=backend) }}"
- - name: Multiline pass only returns first line ({{ backend }})
- assert:
- that:
- - readyamlpass == 'testpassword'
+- name: Multiline pass only returns first line ({{ backend }})
+ assert:
+ that:
+ - readyamlpass == 'testpassword'
- - name: Fetch all from multiline file ({{ backend }})
- set_fact:
- readyamlpass: "{{ lookup('community.general.passwordstore', 'test-multiline-pass', returnall='yes', backend=backend) }}"
+- name: Fetch all from multiline file ({{ backend }})
+ set_fact:
+ readyamlpass: "{{ lookup('community.general.passwordstore', 'test-multiline-pass', returnall='yes', backend=backend) }}"
- - name: Multiline pass returnall returns everything in the file ({{ backend }})
- assert:
- that:
- - readyamlpass == 'testpassword\nrandom additional line\n'
+- name: Multiline pass returnall returns everything in the file ({{ backend }})
+ assert:
+ that:
+ - readyamlpass == 'testpassword\nrandom additional line\n'
- - name: Create a password in a folder ({{ backend }})
- set_fact:
- newpass: "{{ lookup('community.general.passwordstore', 'folder/test-pass', length=8, create=true, backend=backend) }}"
+- name: Create a password in a folder ({{ backend }})
+ set_fact:
+ newpass: "{{ lookup('community.general.passwordstore', 'folder/test-pass', length=8, create=true, backend=backend) }}"
- - name: Fetch password from folder ({{ backend }})
- set_fact:
- readpass: "{{ lookup('community.general.passwordstore', 'folder/test-pass', backend=backend) }}"
+- name: Fetch password from folder ({{ backend }})
+ set_fact:
+ readpass: "{{ lookup('community.general.passwordstore', 'folder/test-pass', backend=backend) }}"
- - name: Verify password from folder ({{ backend }})
- assert:
- that:
- - readpass == newpass
+- name: Verify password from folder ({{ backend }})
+ assert:
+ that:
+ - readpass == newpass
- - name: Try to read folder as passname ({{ backend }})
- set_fact:
- newpass: "{{ lookup('community.general.passwordstore', 'folder', backend=backend) }}"
- ignore_errors: true
- register: eval_error
+- name: Try to read folder as passname ({{ backend }})
+ set_fact:
+ newpass: "{{ lookup('community.general.passwordstore', 'folder', backend=backend) }}"
+ ignore_errors: true
+ register: eval_error
- - name: Make sure reading folder as passname failed ({{ backend }})
- assert:
- that:
- - eval_error is failed
- - '"passname folder not found" in eval_error.msg'
- when: backend != "gopass" # Remove this line once gopass backend can handle this
+- name: Make sure reading folder as passname failed ({{ backend }})
+ assert:
+ that:
+ - eval_error is failed
+ - '"passname folder not found" in eval_error.msg'
+ when: backend != "gopass" # Remove this line once gopass backend can handle this
diff --git a/tests/integration/targets/lookup_passwordstore/tasks/tests.yml b/tests/integration/targets/lookup_passwordstore/tasks/tests.yml
index 65a578c962..3928087072 100644
--- a/tests/integration/targets/lookup_passwordstore/tasks/tests.yml
+++ b/tests/integration/targets/lookup_passwordstore/tasks/tests.yml
@@ -30,7 +30,7 @@
- name: Store path of pass executable
set_fact:
- passpath: "{{ result.stdout }}"
+ passpath: "{{ result.stdout }}"
- name: Move original pass into place if there was a leftover
command:
@@ -49,7 +49,7 @@
- name: Store path of gopass executable
set_fact:
- gopasspath: "{{ result.stdout }}"
+ gopasspath: "{{ result.stdout }}"
- name: Move original gopass into place if there was a leftover
command:
@@ -64,9 +64,9 @@
command: "{{ item }} --version"
register: versions
loop:
- - "{{ gpg2_bin }}"
- - pass
- - gopass
+ - "{{ gpg2_bin }}"
+ - pass
+ - gopass
- name: Output versions of tools
debug:
@@ -131,52 +131,52 @@
- name: Test pass compatibility shim detection
block:
- - name: Move original pass out of the way
- command:
- argv:
- - mv
- - "{{ passpath }}"
- - "{{ passpath }}.testorig"
- args:
- creates: "{{ passpath }}.testorig"
+ - name: Move original pass out of the way
+ command:
+ argv:
+ - mv
+ - "{{ passpath }}"
+ - "{{ passpath }}.testorig"
+ args:
+ creates: "{{ passpath }}.testorig"
- - name: Create dummy pass script
- ansible.builtin.copy:
- content: |
- #!/bin/sh
- echo "shim_ok"
- dest: "{{ passpath }}"
- mode: '0755'
+ - name: Create dummy pass script
+ ansible.builtin.copy:
+ content: |
+ #!/bin/sh
+ echo "shim_ok"
+ dest: "{{ passpath }}"
+ mode: '0755'
- - name: Try reading from non-existent passwordstore location with different pass utility
- set_fact:
- newpass: "{{ lookup('community.general.passwordstore', 'test-pass') }}"
- environment:
- PATH: "/tmp"
+ - name: Try reading from non-existent passwordstore location with different pass utility
+ set_fact:
+ newpass: "{{ lookup('community.general.passwordstore', 'test-pass') }}"
+ environment:
+ PATH: "/tmp"
- - name: Verify password received from shim
- assert:
- that:
- - newpass == "shim_ok"
+ - name: Verify password received from shim
+ assert:
+ that:
+ - newpass == "shim_ok"
- - name: Try to read folder as passname with a different pass utility
- set_fact:
- newpass: "{{ lookup('community.general.passwordstore', 'folder') }}"
+ - name: Try to read folder as passname with a different pass utility
+ set_fact:
+ newpass: "{{ lookup('community.general.passwordstore', 'folder') }}"
- - name: Verify password received from shim
- assert:
- that:
- - newpass == "shim_ok"
+ - name: Verify password received from shim
+ assert:
+ that:
+ - newpass == "shim_ok"
always:
- - name: Move original pass back into place
- command:
- argv:
- - mv
- - "{{ passpath }}.testorig"
- - "{{ passpath }}"
- args:
- removes: "{{ passpath }}.testorig"
+ - name: Move original pass back into place
+ command:
+ argv:
+ - mv
+ - "{{ passpath }}.testorig"
+ - "{{ passpath }}"
+ args:
+ removes: "{{ passpath }}.testorig"
# This are in addition to the real gopass tests above
# and verify plugin logic
@@ -184,56 +184,56 @@
vars:
passwordstore_backend: "gopass"
block:
- - name: Check if gopass executable exists
- stat:
- path: "{{ gopasspath }}"
- register: gopass_check
+ - name: Check if gopass executable exists
+ stat:
+ path: "{{ gopasspath }}"
+ register: gopass_check
- - name: Move original gopass out of the way
- command:
- argv:
- - mv
- - "{{ gopasspath }}"
- - "{{ gopasspath }}.testorig"
- args:
- creates: "{{ gopasspath }}.testorig"
- when: gopass_check.stat.exists == true
+ - name: Move original gopass out of the way
+ command:
+ argv:
+ - mv
+ - "{{ gopasspath }}"
+ - "{{ gopasspath }}.testorig"
+ args:
+ creates: "{{ gopasspath }}.testorig"
+ when: gopass_check.stat.exists == true
- - name: Create mocked gopass script
- ansible.builtin.copy:
- content: |
- #!/bin/sh
- if [ "$GOPASS_NO_REMINDER" != "YES" ]; then
- exit 1
- fi
- if [ "$1" = "--version" ]; then
- exit 2
- fi
- echo "gopass_ok"
- dest: "{{ gopasspath }}"
- mode: '0755'
+ - name: Create mocked gopass script
+ ansible.builtin.copy:
+ content: |
+ #!/bin/sh
+ if [ "$GOPASS_NO_REMINDER" != "YES" ]; then
+ exit 1
+ fi
+ if [ "$1" = "--version" ]; then
+ exit 2
+ fi
+ echo "gopass_ok"
+ dest: "{{ gopasspath }}"
+ mode: '0755'
- - name: Try to read folder as passname using gopass mock
- set_fact:
- newpass: "{{ lookup('community.general.passwordstore', 'folder') }}"
+ - name: Try to read folder as passname using gopass mock
+ set_fact:
+ newpass: "{{ lookup('community.general.passwordstore', 'folder') }}"
- - name: Verify password received from gopass mock
- assert:
- that:
- - newpass == "gopass_ok"
+ - name: Verify password received from gopass mock
+ assert:
+ that:
+ - newpass == "gopass_ok"
always:
- - name: Remove mocked gopass
- ansible.builtin.file:
- path: "{{ gopasspath }}"
- state: absent
+ - name: Remove mocked gopass
+ ansible.builtin.file:
+ path: "{{ gopasspath }}"
+ state: absent
- - name: Move original gopass back into place
- command:
- argv:
- - mv
- - "{{ gopasspath }}.testorig"
- - "{{ gopasspath }}"
- args:
- removes: "{{ gopasspath }}.testorig"
- when: gopass_check.stat.exists == true
+ - name: Move original gopass back into place
+ command:
+ argv:
+ - mv
+ - "{{ gopasspath }}.testorig"
+ - "{{ gopasspath }}"
+ args:
+ removes: "{{ gopasspath }}.testorig"
+ when: gopass_check.stat.exists == true
diff --git a/tests/integration/targets/lookup_random_pet/test.yml b/tests/integration/targets/lookup_random_pet/test.yml
index c61461867a..a40ab2262c 100644
--- a/tests/integration/targets/lookup_random_pet/test.yml
+++ b/tests/integration/targets/lookup_random_pet/test.yml
@@ -6,25 +6,25 @@
- hosts: localhost
gather_facts: false
tasks:
- - name: Call plugin
- set_fact:
- result1: "{{ query('community.general.random_pet', words=3) }}"
- result2: "{{ query('community.general.random_pet', length=3) }}"
- result3: "{{ query('community.general.random_pet', prefix='kubernetes') }}"
- result4: "{{ query('community.general.random_pet', separator='_') }}"
- result5: "{{ query('community.general.random_pet', words=2, length=6, prefix='kubernetes', separator='_') }}"
+ - name: Call plugin
+ set_fact:
+ result1: "{{ query('community.general.random_pet', words=3) }}"
+ result2: "{{ query('community.general.random_pet', length=3) }}"
+ result3: "{{ query('community.general.random_pet', prefix='kubernetes') }}"
+ result4: "{{ query('community.general.random_pet', separator='_') }}"
+ result5: "{{ query('community.general.random_pet', words=2, length=6, prefix='kubernetes', separator='_') }}"
- - name: Check results
- assert:
- that:
- - result1 | length == 1
- - result1[0].split('-') | length == 3
- - result2 | length == 1
- - result2[0].split('-')[0] | length <= 3
- - result3 | length == 1
- - result3[0].split('-')[0] == 'kubernetes'
- - result4 | length == 1
- - result4[0].split('_') | length == 2
- - result5 | length == 1
- - result5[0].split('_') | length == 3
- - result5[0].split('_')[0] == 'kubernetes'
+ - name: Check results
+ assert:
+ that:
+ - result1 | length == 1
+ - result1[0].split('-') | length == 3
+ - result2 | length == 1
+ - result2[0].split('-')[0] | length <= 3
+ - result3 | length == 1
+ - result3[0].split('-')[0] == 'kubernetes'
+ - result4 | length == 1
+ - result4[0].split('_') | length == 2
+ - result5 | length == 1
+ - result5[0].split('_') | length == 3
+ - result5[0].split('_')[0] == 'kubernetes'
diff --git a/tests/integration/targets/lookup_random_string/test.yml b/tests/integration/targets/lookup_random_string/test.yml
index b1f6234102..b74116d04a 100644
--- a/tests/integration/targets/lookup_random_string/test.yml
+++ b/tests/integration/targets/lookup_random_string/test.yml
@@ -6,48 +6,48 @@
- hosts: localhost
gather_facts: false
tasks:
- - name: Call plugin
- set_fact:
- result1: "{{ query('community.general.random_string') }}"
- result2: "{{ query('community.general.random_string', length=0) }}"
- result3: "{{ query('community.general.random_string', length=10) }}"
- result4: "{{ query('community.general.random_string', length=-1) }}"
- result5: "{{ query('community.general.random_string', override_special='_', min_special=1) }}"
- result6: "{{ query('community.general.random_string', upper=false, special=false) }}" # lower case only
- result7: "{{ query('community.general.random_string', lower=false, special=false) }}" # upper case only
- result8: "{{ query('community.general.random_string', lower=false, upper=false, special=false) }}" # number only
- result9: "{{ query('community.general.random_string', lower=false, upper=false, special=false, min_numeric=1, length=1) }}" # single digit only
- result10: "{{ query('community.general.random_string', numbers=false, upper=false, special=false, min_lower=1, length=1) }}" # single lowercase character only
- result11: "{{ query('community.general.random_string', base64=true, length=8) }}"
- result12: "{{ query('community.general.random_string', upper=false, numbers=false, special=false) }}" # all lower case
- result13: "{{ query('community.general.random_string', override_all='0', length=2) }}"
+ - name: Call plugin
+ set_fact:
+ result1: "{{ query('community.general.random_string') }}"
+ result2: "{{ query('community.general.random_string', length=0) }}"
+ result3: "{{ query('community.general.random_string', length=10) }}"
+ result4: "{{ query('community.general.random_string', length=-1) }}"
+ result5: "{{ query('community.general.random_string', override_special='_', min_special=1) }}"
+ result6: "{{ query('community.general.random_string', upper=false, special=false) }}" # lower case only
+ result7: "{{ query('community.general.random_string', lower=false, special=false) }}" # upper case only
+ result8: "{{ query('community.general.random_string', lower=false, upper=false, special=false) }}" # number only
+ result9: "{{ query('community.general.random_string', lower=false, upper=false, special=false, min_numeric=1, length=1) }}" # single digit only
+ result10: "{{ query('community.general.random_string', numbers=false, upper=false, special=false, min_lower=1, length=1) }}" # single lowercase character only
+ result11: "{{ query('community.general.random_string', base64=true, length=8) }}"
+ result12: "{{ query('community.general.random_string', upper=false, numbers=false, special=false) }}" # all lower case
+ result13: "{{ query('community.general.random_string', override_all='0', length=2) }}"
- - name: Raise error when impossible constraints are provided
- set_fact:
- impossible: "{{ query('community.general.random_string', upper=false, lower=false, special=false, numbers=false) }}"
- ignore_errors: true
- register: impossible_result
+ - name: Raise error when impossible constraints are provided
+ set_fact:
+ impossible: "{{ query('community.general.random_string', upper=false, lower=false, special=false, numbers=false) }}"
+ ignore_errors: true
+ register: impossible_result
- - name: Check results
- assert:
- that:
- - result1[0] | length == 8
- - result2[0] | length == 0
- - result3[0] | length == 10
- - result4[0] | length == 0
- - result5[0] | length == 8
- - "'_' in result5[0]"
- - result6[0] is lower
- - result7[0] is upper
- - result8[0] | regex_replace('^(\d+)$', '') == ''
- - result9[0] | regex_replace('^(\d+)$', '') == ''
- - result9[0] | length == 1
- - result10[0] | length == 1
- - result10[0] is lower
- # if input string is not multiple of 3, base64 encoded string will be padded with =
- - result11[0].endswith('=')
- - result12[0] is lower
- - result13[0] | length == 2
- - result13[0] == '00'
- - impossible_result is failed
- - "'Available characters cannot' in impossible_result.msg"
+ - name: Check results
+ assert:
+ that:
+ - result1[0] | length == 8
+ - result2[0] | length == 0
+ - result3[0] | length == 10
+ - result4[0] | length == 0
+ - result5[0] | length == 8
+ - "'_' in result5[0]"
+ - result6[0] is lower
+ - result7[0] is upper
+ - result8[0] | regex_replace('^(\d+)$', '') == ''
+ - result9[0] | regex_replace('^(\d+)$', '') == ''
+ - result9[0] | length == 1
+ - result10[0] | length == 1
+ - result10[0] is lower
+ # if input string is not multiple of 3, base64 encoded string will be padded with =
+ - result11[0].endswith('=')
+ - result12[0] is lower
+ - result13[0] | length == 2
+ - result13[0] == '00'
+ - impossible_result is failed
+ - "'Available characters cannot' in impossible_result.msg"
diff --git a/tests/integration/targets/lookup_random_words/test.yml b/tests/integration/targets/lookup_random_words/test.yml
index e1b6fde13b..0c8fd8e110 100644
--- a/tests/integration/targets/lookup_random_words/test.yml
+++ b/tests/integration/targets/lookup_random_words/test.yml
@@ -6,28 +6,28 @@
- hosts: localhost
gather_facts: false
tasks:
- - name: Call random_words plugin
- set_fact:
- result1: "{{ query('community.general.random_words') }}"
- result2: "{{ query('community.general.random_words', min_length=5, max_length=5) }}"
- result3: "{{ query('community.general.random_words', delimiter='!') }}"
- result4: "{{ query('community.general.random_words', numwords=3, delimiter='-', case='capitalize') }}"
- result5: "{{ query('community.general.random_words', min_length=5, max_length=5, numwords=3, delimiter='') }}"
+ - name: Call random_words plugin
+ set_fact:
+ result1: "{{ query('community.general.random_words') }}"
+ result2: "{{ query('community.general.random_words', min_length=5, max_length=5) }}"
+ result3: "{{ query('community.general.random_words', delimiter='!') }}"
+ result4: "{{ query('community.general.random_words', numwords=3, delimiter='-', case='capitalize') }}"
+ result5: "{{ query('community.general.random_words', min_length=5, max_length=5, numwords=3, delimiter='') }}"
- - name: Check results
- assert:
- that:
- - result1 | length == 1
- - result1[0] | length >= 35
- - result2 | length == 1
- - result2[0] | length == 35
- - result3 | length == 1
- - result3[0].count("!") == 5
- - result4 | length == 1
- - result4[0] | length >= 17
- - result4[0] | length <= 29
- - result4[0] | regex_findall("[A-Z]") | length == 3
- # If one of the random words is 't-shirt', there are more than 2 dashes...
- - result4[0].count("-") == 2 or "t-shirt" in result4[0].lower()
- - result5 | length == 1
- - result5[0] | length == 15
+ - name: Check results
+ assert:
+ that:
+ - result1 | length == 1
+ - result1[0] | length >= 35
+ - result2 | length == 1
+ - result2[0] | length == 35
+ - result3 | length == 1
+ - result3[0].count("!") == 5
+ - result4 | length == 1
+ - result4[0] | length >= 17
+ - result4[0] | length <= 29
+ - result4[0] | regex_findall("[A-Z]") | length == 3
+ # If one of the random words is 't-shirt', there are more than 2 dashes...
+ - result4[0].count("-") == 2 or "t-shirt" in result4[0].lower()
+ - result5 | length == 1
+ - result5[0] | length == 15
diff --git a/tests/integration/targets/lvg/aliases b/tests/integration/targets/lvg/aliases
index cbe489d88b..eb76529397 100644
--- a/tests/integration/targets/lvg/aliases
+++ b/tests/integration/targets/lvg/aliases
@@ -11,3 +11,4 @@ skip/freebsd
skip/osx
skip/macos
skip/alpine3.21 # TODO try to fix
+skip/alpine3.22 # TODO try to fix
diff --git a/tests/integration/targets/lvg/tasks/test_active_create.yml b/tests/integration/targets/lvg/tasks/test_active_create.yml
index 7ac1ffedd7..2ad530bc53 100644
--- a/tests/integration/targets/lvg/tasks/test_active_create.yml
+++ b/tests/integration/targets/lvg/tasks/test_active_create.yml
@@ -64,8 +64,8 @@
assert:
that: "inactive_by_option_vg_autoact_status_result.stdout | length == 0"
always:
- - name: Cleanup vg_autoact_test
- lvg:
- state: absent
- vg: vg_autoact_test
- force: true
+ - name: Cleanup vg_autoact_test
+ lvg:
+ state: absent
+ vg: vg_autoact_test
+ force: true
diff --git a/tests/integration/targets/lvg/tasks/test_pvresize.yml b/tests/integration/targets/lvg/tasks/test_pvresize.yml
index 3f3b9dbddb..90bea4e5d7 100644
--- a/tests/integration/targets/lvg/tasks/test_pvresize.yml
+++ b/tests/integration/targets/lvg/tasks/test_pvresize.yml
@@ -14,8 +14,8 @@
- name: Assert the testvg size is 33554432B
assert:
- that:
- - "'33554432B' == cmd_result.stdout"
+ that:
+ - "'33554432B' == cmd_result.stdout"
- name: Increases size in file
command: "dd if=/dev/zero bs=8MiB count=1 of={{ remote_tmp_dir }}/img1 conv=notrunc oflag=append"
@@ -25,14 +25,14 @@
- name: "Reruns lvg with pvresize:no"
lvg:
- vg: testvg
- pvs: "{{ loop_device1 }}"
- pvresize: false
+ vg: testvg
+ pvs: "{{ loop_device1 }}"
+ pvresize: false
register: cmd_result
-
+
- assert:
that:
- - cmd_result is not changed
+ - cmd_result is not changed
- name: Gets current vg size
shell: vgs -v testvg -o pv_size --noheading --units b | xargs
@@ -40,36 +40,36 @@
- name: Assert the testvg size is still 33554432B
assert:
- that:
- - "'33554432B' == cmd_result.stdout"
+ that:
+ - "'33554432B' == cmd_result.stdout"
- name: "Reruns lvg with pvresize:yes and check_mode:yes"
lvg:
- vg: testvg
- pvs: "{{ loop_device1 }}"
- pvresize: true
+ vg: testvg
+ pvs: "{{ loop_device1 }}"
+ pvresize: true
check_mode: true
register: cmd_result
-
+
- name: Assert that the module returned the state was changed
assert:
that:
- - cmd_result is changed
+ - cmd_result is changed
- name: Gets current vg size
shell: vgs -v testvg -o pv_size --noheading --units b | xargs
register: cmd_result
-
+
- name: Assert the testvg size is still 33554432B
assert:
- that:
- - "'33554432B' == cmd_result.stdout"
+ that:
+ - "'33554432B' == cmd_result.stdout"
- name: "Reruns lvg with pvresize:yes"
lvg:
- vg: testvg
- pvs: "{{ loop_device1 }}"
- pvresize: true
+ vg: testvg
+ pvs: "{{ loop_device1 }}"
+ pvresize: true
- name: Gets current vg size
shell: vgs -v testvg -o pv_size --noheading --units b | xargs
@@ -77,5 +77,5 @@
- name: Assert the testvg size is now 41943040B
assert:
- that:
- - "'41943040B' == cmd_result.stdout"
+ that:
+ - "'41943040B' == cmd_result.stdout"
diff --git a/tests/integration/targets/lvm_pv/aliases b/tests/integration/targets/lvm_pv/aliases
new file mode 100644
index 0000000000..64d439099c
--- /dev/null
+++ b/tests/integration/targets/lvm_pv/aliases
@@ -0,0 +1,13 @@
+# Copyright (c) Contributors to the Ansible project
+# Based on the integraton test for the lvg module
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/posix/1
+azp/posix/vm
+destructive
+needs/privileged
+skip/aix
+skip/freebsd
+skip/osx
+skip/macos
diff --git a/tests/integration/targets/lvm_pv/meta/main.yml b/tests/integration/targets/lvm_pv/meta/main.yml
new file mode 100644
index 0000000000..90c5d5cb8d
--- /dev/null
+++ b/tests/integration/targets/lvm_pv/meta/main.yml
@@ -0,0 +1,9 @@
+---
+# Copyright (c) Ansible Project
+# Based on the integraton test for the lvg module
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+dependencies:
+ - setup_pkg_mgr
+ - setup_remote_tmp_dir
diff --git a/tests/integration/targets/lvm_pv/tasks/cleanup.yml b/tests/integration/targets/lvm_pv/tasks/cleanup.yml
new file mode 100644
index 0000000000..a9c0bb095d
--- /dev/null
+++ b/tests/integration/targets/lvm_pv/tasks/cleanup.yml
@@ -0,0 +1,12 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Detaching loop device
+ ansible.builtin.command: losetup -d {{ loop_device.stdout }}
+
+- name: Removing loop device file
+ ansible.builtin.file:
+ path: "{{ remote_tmp_dir }}/test_lvm_pv.img"
+ state: absent
diff --git a/tests/integration/targets/lvm_pv/tasks/creation.yml b/tests/integration/targets/lvm_pv/tasks/creation.yml
new file mode 100644
index 0000000000..a26a39c524
--- /dev/null
+++ b/tests/integration/targets/lvm_pv/tasks/creation.yml
@@ -0,0 +1,33 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Creating a 50MB file for loop device
+ ansible.builtin.command: dd if=/dev/zero of={{ remote_tmp_dir }}/test_lvm_pv.img bs=1M count=50
+ args:
+ creates: "{{ remote_tmp_dir }}/test_lvm_pv.img"
+
+- name: Creating loop device
+ ansible.builtin.command: losetup -f
+ register: loop_device
+
+- name: Associating loop device with file
+ ansible.builtin.command: 'losetup {{ loop_device.stdout }} {{ remote_tmp_dir }}/test_lvm_pv.img'
+
+- name: Creating physical volume
+ community.general.lvm_pv:
+ device: "{{ loop_device.stdout }}"
+ register: result
+
+- name: Checking physical volume size
+ ansible.builtin.command: pvs --noheadings -o pv_size --units M {{ loop_device.stdout }}
+ register: pv_size_output
+
+- name: Asserting physical volume was created
+ ansible.builtin.assert:
+ that:
+ - result.changed == true
+ - (pv_size_output.stdout | trim | regex_replace('M', '') | float) > 45
+ - (pv_size_output.stdout | trim | regex_replace('M', '') | float) < 55
+ - "'created' in result.msg"
diff --git a/tests/integration/targets/lvm_pv/tasks/main.yml b/tests/integration/targets/lvm_pv/tasks/main.yml
new file mode 100644
index 0000000000..16c966d274
--- /dev/null
+++ b/tests/integration/targets/lvm_pv/tasks/main.yml
@@ -0,0 +1,27 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Copyright (c) Contributors to the Ansible project
+# Based on the integraton test for the lvg module
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Install required packages (Linux)
+ when: ansible_system == 'Linux'
+ ansible.builtin.package:
+ name: lvm2
+ state: present
+
+- name: Testing lvm_pv module
+ block:
+ - import_tasks: creation.yml
+
+ - import_tasks: resizing.yml
+
+ - import_tasks: removal.yml
+
+ always:
+ - import_tasks: cleanup.yml
diff --git a/tests/integration/targets/lvm_pv/tasks/removal.yml b/tests/integration/targets/lvm_pv/tasks/removal.yml
new file mode 100644
index 0000000000..d59a890a55
--- /dev/null
+++ b/tests/integration/targets/lvm_pv/tasks/removal.yml
@@ -0,0 +1,16 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Removing physical volume
+ community.general.lvm_pv:
+ device: "{{ loop_device.stdout }}"
+ state: absent
+ register: remove_result
+
+- name: Asserting physical volume was removed
+ ansible.builtin.assert:
+ that:
+ - remove_result.changed == true
+ - "'removed' in remove_result.msg"
diff --git a/tests/integration/targets/lvm_pv/tasks/resizing.yml b/tests/integration/targets/lvm_pv/tasks/resizing.yml
new file mode 100644
index 0000000000..184fe7498c
--- /dev/null
+++ b/tests/integration/targets/lvm_pv/tasks/resizing.yml
@@ -0,0 +1,27 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Growing the loop device file to 100MB
+ ansible.builtin.shell: truncate -s 100M {{ remote_tmp_dir }}/test_lvm_pv.img
+
+- name: Refreshing the loop device
+ ansible.builtin.shell: losetup -c {{ loop_device.stdout }}
+
+- name: Resizing the physical volume
+ community.general.lvm_pv:
+ device: "{{ loop_device.stdout }}"
+ resize: true
+ register: resize_result
+
+- name: Checking physical volume size
+ ansible.builtin.command: pvs --noheadings -o pv_size --units M {{ loop_device.stdout }}
+ register: pv_size_output
+
+- name: Asserting physical volume was resized
+ ansible.builtin.assert:
+ that:
+ - resize_result.changed == true
+ - (pv_size_output.stdout | trim | regex_replace('M', '') | float) > 95
+ - "'resized' in resize_result.msg"
diff --git a/tests/integration/targets/mail/tasks/main.yml b/tests/integration/targets/mail/tasks/main.yml
index 83c242ad23..3831a43643 100644
--- a/tests/integration/targets/mail/tasks/main.yml
+++ b/tests/integration/targets/mail/tasks/main.yml
@@ -10,101 +10,101 @@
# TODO: Our current implementation does not handle SMTP authentication
-- when:
+- when:
# TODO: https://github.com/ansible-collections/community.general/issues/4656
- ansible_python.version.major != 3 or ansible_python.version.minor < 12
block:
- # NOTE: If the system does not support smtpd-tls (python 2.6 and older) we do basic tests
- - name: Attempt to install smtpd-tls
- pip:
- name: smtpd-tls
- state: present
- ignore_errors: true
- register: smtpd_tls
+ # NOTE: If the system does not support smtpd-tls (python 2.6 and older) we do basic tests
+ - name: Attempt to install smtpd-tls
+ pip:
+ name: smtpd-tls
+ state: present
+ ignore_errors: true
+ register: smtpd_tls
- - name: Install test smtpserver
- copy:
- src: '{{ item }}'
- dest: '{{ remote_tmp_dir }}/{{ item }}'
- loop:
- - smtpserver.py
- - smtpserver.crt
- - smtpserver.key
+ - name: Install test smtpserver
+ copy:
+ src: '{{ item }}'
+ dest: '{{ remote_tmp_dir }}/{{ item }}'
+ loop:
+ - smtpserver.py
+ - smtpserver.crt
+ - smtpserver.key
- # FIXME: Verify the mail after it was send would be nice
- # This would require either dumping the content, or registering async task output
- - name: Start test smtpserver
- shell: '{{ ansible_python.executable }} {{ remote_tmp_dir }}/smtpserver.py 10025:10465'
- async: 45
- poll: 0
- register: smtpserver
+ # FIXME: Verify the mail after it was send would be nice
+ # This would require either dumping the content, or registering async task output
+ - name: Start test smtpserver
+ shell: '{{ ansible_python.executable }} {{ remote_tmp_dir }}/smtpserver.py 10025:10465'
+ async: 45
+ poll: 0
+ register: smtpserver
- - name: Send a basic test-mail
- mail:
- port: 10025
- subject: Test mail 1 (smtp)
- secure: never
+ - name: Send a basic test-mail
+ mail:
+ port: 10025
+ subject: Test mail 1 (smtp)
+ secure: never
- - name: Send a test-mail with body and specific recipient
- mail:
- port: 10025
- from: ansible@localhost
- to: root@localhost
- subject: Test mail 2 (smtp + body)
- body: Test body 2
- secure: never
+ - name: Send a test-mail with body and specific recipient
+ mail:
+ port: 10025
+ from: ansible@localhost
+ to: root@localhost
+ subject: Test mail 2 (smtp + body)
+ body: Test body 2
+ secure: never
- - name: Send a test-mail with attachment
- mail:
- port: 10025
- from: ansible@localhost
- to: root@localhost
- subject: Test mail 3 (smtp + body + attachment)
- body: Test body 3
- attach: /etc/group
- secure: never
+ - name: Send a test-mail with attachment
+ mail:
+ port: 10025
+ from: ansible@localhost
+ to: root@localhost
+ subject: Test mail 3 (smtp + body + attachment)
+ body: Test body 3
+ attach: /etc/group
+ secure: never
- # NOTE: This might fail if smtpd-tls is missing or python 2.7.8 or older is used
- - name: Send a test-mail using starttls
- mail:
- port: 10025
- from: ansible@localhost
- to: root@localhost
- subject: Test mail 4 (smtp + starttls + body + attachment)
- body: Test body 4
- attach: /etc/group
- secure: starttls
- ignore_errors: true
- register: starttls_support
+ # NOTE: This might fail if smtpd-tls is missing or python 2.7.8 or older is used
+ - name: Send a test-mail using starttls
+ mail:
+ port: 10025
+ from: ansible@localhost
+ to: root@localhost
+ subject: Test mail 4 (smtp + starttls + body + attachment)
+ body: Test body 4
+ attach: /etc/group
+ secure: starttls
+ ignore_errors: true
+ register: starttls_support
- # NOTE: This might fail if smtpd-tls is missing or python 2.7.8 or older is used
- - name: Send a test-mail using TLS
- mail:
- port: 10465
- from: ansible@localhost
- to: root@localhost
- subject: Test mail 5 (smtp + tls + body + attachment)
- body: Test body 5
- attach: /etc/group
- secure: always
- ignore_errors: true
- register: tls_support
+ # NOTE: This might fail if smtpd-tls is missing or python 2.7.8 or older is used
+ - name: Send a test-mail using TLS
+ mail:
+ port: 10465
+ from: ansible@localhost
+ to: root@localhost
+ subject: Test mail 5 (smtp + tls + body + attachment)
+ body: Test body 5
+ attach: /etc/group
+ secure: always
+ ignore_errors: true
+ register: tls_support
- - fail:
- msg: Sending mail using starttls failed.
- when: smtpd_tls is succeeded and starttls_support is failed and tls_support is succeeded
+ - fail:
+ msg: Sending mail using starttls failed.
+ when: smtpd_tls is succeeded and starttls_support is failed and tls_support is succeeded
- - fail:
- msg: Send mail using TLS failed.
- when: smtpd_tls is succeeded and tls_support is failed and starttls_support is succeeded
+ - fail:
+ msg: Send mail using TLS failed.
+ when: smtpd_tls is succeeded and tls_support is failed and starttls_support is succeeded
- - name: Send a test-mail with body, specific recipient and specific ehlohost
- mail:
- port: 10025
- ehlohost: some.domain.tld
- from: ansible@localhost
- to: root@localhost
- subject: Test mail 6 (smtp + body + ehlohost)
- body: Test body 6
- secure: never
+ - name: Send a test-mail with body, specific recipient and specific ehlohost
+ mail:
+ port: 10025
+ ehlohost: some.domain.tld
+ from: ansible@localhost
+ to: root@localhost
+ subject: Test mail 6 (smtp + body + ehlohost)
+ body: Test body 6
+ secure: never
diff --git a/tests/integration/targets/mas/tasks/main.yml b/tests/integration/targets/mas/tasks/main.yml
index 839620779e..d4b51d3842 100644
--- a/tests/integration/targets/mas/tasks/main.yml
+++ b/tests/integration/targets/mas/tasks/main.yml
@@ -24,7 +24,7 @@
- name: Ensure the app is uninstalled
assert:
that:
- - install_status.stat.exists == false
+ - install_status.stat.exists == false
- name: Wait until the OS-internal cache was updated
pause:
@@ -41,8 +41,8 @@
- name: Ensure that the status would have changed
assert:
that:
- - install_check is changed
- - install_check.msg == "Installed 1 app(s)"
+ - install_check is changed
+ - install_check.msg == "Installed 1 app(s)"
- name: Determine whether the app is installed
stat:
@@ -52,7 +52,7 @@
- name: Ensure the app is not yet installed
assert:
that:
- - install_status.stat.exists == false
+ - install_status.stat.exists == false
- name: Install Rested
mas:
@@ -63,8 +63,8 @@
- name: Ensure that the status changed
assert:
that:
- - install is changed
- - install.msg == "Installed 1 app(s)"
+ - install is changed
+ - install.msg == "Installed 1 app(s)"
- name: Determine whether the app is installed
stat:
@@ -74,7 +74,7 @@
- name: Ensure the app is installed
assert:
that:
- - install_status.stat.exists == true
+ - install_status.stat.exists == true
- name: Wait until the OS-internal cache was updated
pause:
@@ -89,8 +89,8 @@
- name: Ensure that the status is unchanged (already installed)
assert:
that:
- - install_again is not changed
- - "'msg' not in install_again"
+ - install_again is not changed
+ - "'msg' not in install_again"
# Uninstallation
- name: Check if Rested needs to be uninstalled
@@ -104,8 +104,8 @@
- name: Ensure that the status would have changed
assert:
that:
- - uninstall_check is changed
- - uninstall_check.msg == "Uninstalled 1 app(s)"
+ - uninstall_check is changed
+ - uninstall_check.msg == "Uninstalled 1 app(s)"
- name: Determine whether the app is installed
stat:
@@ -115,7 +115,7 @@
- name: Ensure the app is not yet uninstalled
assert:
that:
- - install_status.stat.exists == true
+ - install_status.stat.exists == true
- name: Uninstall Rested
mas:
@@ -127,8 +127,8 @@
- name: Ensure that the status changed
assert:
that:
- - uninstall is changed
- - uninstall.msg == "Uninstalled 1 app(s)"
+ - uninstall is changed
+ - uninstall.msg == "Uninstalled 1 app(s)"
- name: Determine whether the app is installed
stat:
@@ -138,7 +138,7 @@
- name: Ensure the app is uninstalled
assert:
that:
- - uninstall_status.stat.exists == false
+ - uninstall_status.stat.exists == false
- name: Wait until the OS-internal cache was updated
pause:
@@ -154,5 +154,5 @@
- name: Ensure that the status is unchanged (already uninstalled)
assert:
that:
- - uninstall_again is not changed
- - "'msg' not in uninstall_again"
+ - uninstall_again is not changed
+ - "'msg' not in uninstall_again"
diff --git a/tests/integration/targets/module_helper/library/mdepfail.py b/tests/integration/targets/module_helper/library/mdepfail.py
index b61c32a4da..ba315d0111 100644
--- a/tests/integration/targets/module_helper/library/mdepfail.py
+++ b/tests/integration/targets/module_helper/library/mdepfail.py
@@ -57,7 +57,7 @@ class MSimple(ModuleHelper):
raise Exception("a >= 100")
if self.vars.c == "abc change":
self.vars['abc'] = "changed abc"
- if self.vars.get('a', 0) == 2:
+ if self.vars.a == 2:
self.vars['b'] = str(self.vars.b) * 2
self.vars['c'] = str(self.vars.c) * 2
diff --git a/tests/integration/targets/module_helper/library/msimple.py b/tests/integration/targets/module_helper/library/msimple.py
index 096e515247..41407ec50e 100644
--- a/tests/integration/targets/module_helper/library/msimple.py
+++ b/tests/integration/targets/module_helper/library/msimple.py
@@ -63,7 +63,7 @@ class MSimple(ModuleHelper):
raise Exception("a >= 100")
if self.vars.c == "abc change":
self.vars['abc'] = "changed abc"
- if self.vars.get('a', 0) == 2:
+ if self.vars.a == 2:
self.vars['b'] = str(self.vars.b) * 2
self.vars['c'] = str(self.vars.c) * 2
self.process_a3_bc()
diff --git a/tests/integration/targets/module_helper/library/mstate.py b/tests/integration/targets/module_helper/library/mstate.py
index b3b4ed5e69..bfaab03755 100644
--- a/tests/integration/targets/module_helper/library/mstate.py
+++ b/tests/integration/targets/module_helper/library/mstate.py
@@ -49,7 +49,6 @@ class MState(StateModuleHelper):
state=dict(type='str', choices=['join', 'b_x_a', 'c_x_a', 'both_x_a', 'nop'], default='join'),
),
)
- use_old_vardict = False
def __init_module__(self):
self.vars.set('result', "abc", diff=True)
diff --git a/tests/integration/targets/module_helper/tasks/msimpleda.yml b/tests/integration/targets/module_helper/tasks/msimpleda.yml
index 5fe727ca5e..2d89cbaa39 100644
--- a/tests/integration/targets/module_helper/tasks/msimpleda.yml
+++ b/tests/integration/targets/module_helper/tasks/msimpleda.yml
@@ -11,11 +11,18 @@
attr2_depr_dict_dt:
msg: Attribute attr2 is deprecated
version: 9.9.9
- plugin:
- requested_name: msimpleda
- resolved_name: msimpleda
- type: module
- collection_name: null # should be "community.general"; this will hopefully change back because this seriously sucks
+ collection_name: community.general
+ deprecator:
+ resolved_name: community.general
+ type: collection
+ # Latest version:
+ attr2_depr_dict_dt2:
+ msg: Attribute attr2 is deprecated
+ version: 9.9.9
+ collection_name: community.general
+ deprecator:
+ resolved_name: community.general
+ type: ~
- name: test msimpleda 1
msimpleda:
@@ -27,7 +34,12 @@
that:
- simple1.a == 1
- simple1.attr1 == "abc"
- - ("deprecations" not in simple1) or (attr2_depr_dict not in simple1.deprecations and attr2_depr_dict_dt not in simple1.deprecations)
+ - >-
+ ("deprecations" not in simple1) or (
+ attr2_depr_dict not in simple1.deprecations and
+ attr2_depr_dict_dt not in simple1.deprecations and
+ attr2_depr_dict_dt2 not in simple1.deprecations
+ )
- name: test msimpleda 2
msimpleda:
@@ -44,4 +56,7 @@
- simple2.a == 2
- simple2.attr2 == "def"
- '"deprecations" in simple2'
- - attr2_depr_dict in simple2.deprecations or attr2_depr_dict_dt in simple2.deprecations
+ - >-
+ attr2_depr_dict in simple2.deprecations or
+ attr2_depr_dict_dt in simple2.deprecations or
+ attr2_depr_dict_dt2 in simple2.deprecations
diff --git a/tests/integration/targets/monit/tasks/main.yml b/tests/integration/targets/monit/tasks/main.yml
index ea85954125..518e997c32 100644
--- a/tests/integration/targets/monit/tasks/main.yml
+++ b/tests/integration/targets/monit/tasks/main.yml
@@ -9,91 +9,91 @@
# SPDX-License-Identifier: GPL-3.0-or-later
- block:
- - name: Install EPEL repository (RHEL only)
- include_role:
- name: setup_epel
- when:
- - ansible_distribution in ['RedHat', 'CentOS']
- - ansible_distribution_major_version is version('9', '<')
+ - name: Install EPEL repository (RHEL only)
+ include_role:
+ name: setup_epel
+ when:
+ - ansible_distribution in ['RedHat', 'CentOS']
+ - ansible_distribution_major_version is version('9', '<')
- - name: create required directories
- become: true
- file:
- path: "{{ item }}"
- state: directory
- loop:
- - /var/lib/monit
- - /var/run/monit
- - "{{ process_root }}"
+ - name: create required directories
+ become: true
+ file:
+ path: "{{ item }}"
+ state: directory
+ loop:
+ - /var/lib/monit
+ - /var/run/monit
+ - "{{ process_root }}"
- - name: install monit
- become: true
- package:
- name: monit
- state: present
+ - name: install monit
+ become: true
+ package:
+ name: monit
+ state: present
- - include_vars: '{{ item }}'
- with_first_found:
- - files:
- - "{{ ansible_facts.distribution }}-{{ ansible_facts.distribution_major_version }}.yml"
- - '{{ ansible_os_family }}.yml'
- - 'defaults.yml'
+ - include_vars: '{{ item }}'
+ with_first_found:
+ - files:
+ - "{{ ansible_facts.distribution }}-{{ ansible_facts.distribution_major_version }}.yml"
+ - '{{ ansible_os_family }}.yml'
+ - 'defaults.yml'
- - name: monit config
- become: true
- template:
- src: "monitrc.j2"
- dest: "{{ monitrc }}"
+ - name: monit config
+ become: true
+ template:
+ src: "monitrc.j2"
+ dest: "{{ monitrc }}"
- - name: copy process file
- become: true
- copy:
- src: httpd_echo.py
- dest: "{{ process_file }}"
+ - name: copy process file
+ become: true
+ copy:
+ src: httpd_echo.py
+ dest: "{{ process_file }}"
- - name: Install virtualenv on CentOS 8
- package:
- name: virtualenv
- state: present
- when: ansible_distribution == 'CentOS' and ansible_distribution_major_version == '8'
+ - name: Install virtualenv on CentOS 8
+ package:
+ name: virtualenv
+ state: present
+ when: ansible_distribution == 'CentOS' and ansible_distribution_major_version == '8'
- - name: Install virtualenv on Arch Linux
- pip:
- name: virtualenv
- state: present
- when: ansible_os_family == 'Archlinux'
+ - name: Install virtualenv on Arch Linux
+ pip:
+ name: virtualenv
+ state: present
+ when: ansible_os_family == 'Archlinux'
- - name: install dependencies
- pip:
- name: "{{ item }}"
- virtualenv: "{{ process_venv }}"
- extra_args: "-c {{ remote_constraints }}"
- loop:
- - setuptools==44
- - python-daemon
+ - name: install dependencies
+ pip:
+ name: "{{ item }}"
+ virtualenv: "{{ process_venv }}"
+ extra_args: "-c {{ remote_constraints }}"
+ loop:
+ - setuptools==44
+ - python-daemon
- - name: restart monit
- become: true
- service:
- name: monit
- state: restarted
+ - name: restart monit
+ become: true
+ service:
+ name: monit
+ state: restarted
- - include_tasks: test.yml
+ - include_tasks: test.yml
always:
- - name: stop monit
- become: true
- service:
- name: monit
- state: stopped
+ - name: stop monit
+ become: true
+ service:
+ name: monit
+ state: stopped
- - name: uninstall monit
- become: true
- package:
- name: monit
- state: absent
+ - name: uninstall monit
+ become: true
+ package:
+ name: monit
+ state: absent
- - name: remove process files
- file:
- path: "{{ process_root }}"
- state: absent
+ - name: remove process files
+ file:
+ path: "{{ process_root }}"
+ state: absent
diff --git a/tests/integration/targets/mqtt/tasks/main.yml b/tests/integration/targets/mqtt/tasks/main.yml
index 3fd11643ee..33f9307c5f 100644
--- a/tests/integration/targets/mqtt/tasks/main.yml
+++ b/tests/integration/targets/mqtt/tasks/main.yml
@@ -9,6 +9,6 @@
# SPDX-License-Identifier: GPL-3.0-or-later
- include_tasks: ubuntu.yml
- when:
+ when:
- ansible_distribution == 'Ubuntu'
- ansible_distribution_release not in ['focal', 'jammy', 'noble']
diff --git a/tests/integration/targets/mqtt/tasks/ubuntu.yml b/tests/integration/targets/mqtt/tasks/ubuntu.yml
index 0c0a12d041..332a10dfed 100644
--- a/tests/integration/targets/mqtt/tasks/ubuntu.yml
+++ b/tests/integration/targets/mqtt/tasks/ubuntu.yml
@@ -66,9 +66,9 @@
# port: 8885
# register: result
-#- assert:
-# that:
-# - result is success
+# - assert:
+# that:
+# - result is success
- name: Send a message, client TLS1.1, server (required) TLS1.2 - Expected failure
mqtt:
diff --git a/tests/integration/targets/nomad/files/job.hcl b/tests/integration/targets/nomad/files/job.hcl
index 8f01f04396..58e4de31d5 100644
--- a/tests/integration/targets/nomad/files/job.hcl
+++ b/tests/integration/targets/nomad/files/job.hcl
@@ -36,7 +36,7 @@ job "example" {
#
type = "service"
-
+
# The "constraint" stanza defines additional constraints for placing this job,
# in addition to any resource or driver constraints. This stanza may be placed
# at the "job", "group", or "task" level, and supports variable interpolation.
diff --git a/tests/integration/targets/nomad/tasks/main.yml b/tests/integration/targets/nomad/tasks/main.yml
index 1a143be059..87b1c0474e 100644
--- a/tests/integration/targets/nomad/tasks/main.yml
+++ b/tests/integration/targets/nomad/tasks/main.yml
@@ -17,95 +17,95 @@
nomad_cmd: '{{ remote_tmp_dir }}/nomad'
block:
- - name: Install requests<2.20 (CentOS/RHEL 6)
- pip:
- name: requests<2.20
- extra_args: "-c {{ remote_constraints }}"
- register: result
- until: result is success
- when: ansible_distribution_file_variety|default() == 'RedHat' and ansible_distribution_major_version is version('6', '<=')
+ - name: Install requests<2.20 (CentOS/RHEL 6)
+ pip:
+ name: requests<2.20
+ extra_args: "-c {{ remote_constraints }}"
+ register: result
+ until: result is success
+ when: ansible_distribution_file_variety|default() == 'RedHat' and ansible_distribution_major_version is version('6', '<=')
- - name: Install python-nomad
- pip:
- name: python-nomad
- extra_args: "-c {{ remote_constraints }}"
- register: result
- until: result is success
+ - name: Install python-nomad
+ pip:
+ name: python-nomad
+ extra_args: "-c {{ remote_constraints }}"
+ register: result
+ until: result is success
- - name: Install jmespath
- pip:
- name: jmespath
- extra_args: "-c {{ remote_constraints }}"
- register: result
- until: result is success
+ - name: Install jmespath
+ pip:
+ name: jmespath
+ extra_args: "-c {{ remote_constraints }}"
+ register: result
+ until: result is success
- - name: Generate privatekey
- community.crypto.openssl_privatekey:
- path: '{{ remote_tmp_dir }}/privatekey.pem'
+ - name: Generate privatekey
+ community.crypto.openssl_privatekey:
+ path: '{{ remote_tmp_dir }}/privatekey.pem'
- - name: Generate CSR
- community.crypto.openssl_csr:
- path: '{{ remote_tmp_dir }}/csr.csr'
- privatekey_path: '{{ remote_tmp_dir }}/privatekey.pem'
- subject:
- commonName: localhost
+ - name: Generate CSR
+ community.crypto.openssl_csr:
+ path: '{{ remote_tmp_dir }}/csr.csr'
+ privatekey_path: '{{ remote_tmp_dir }}/privatekey.pem'
+ subject:
+ commonName: localhost
- - name: Generate selfsigned certificate
- register: selfsigned_certificate
- community.crypto.x509_certificate:
- path: '{{ remote_tmp_dir }}/cert.pem'
- csr_path: '{{ remote_tmp_dir }}/csr.csr'
- privatekey_path: '{{ remote_tmp_dir }}/privatekey.pem'
- provider: selfsigned
- selfsigned_digest: sha256
+ - name: Generate selfsigned certificate
+ register: selfsigned_certificate
+ community.crypto.x509_certificate:
+ path: '{{ remote_tmp_dir }}/cert.pem'
+ csr_path: '{{ remote_tmp_dir }}/csr.csr'
+ privatekey_path: '{{ remote_tmp_dir }}/privatekey.pem'
+ provider: selfsigned
+ selfsigned_digest: sha256
- - name: Install unzip
- package:
- name: unzip
- register: result
- until: result is success
- when: ansible_distribution != "MacOSX"
+ - name: Install unzip
+ package:
+ name: unzip
+ register: result
+ until: result is success
+ when: ansible_distribution != "MacOSX"
- - assert:
- that: ansible_architecture in ['i386', 'x86_64', 'amd64']
+ - assert:
+ that: ansible_architecture in ['i386', 'x86_64', 'amd64']
- - set_fact:
- nomad_arch: '386'
- when: ansible_architecture == 'i386'
+ - set_fact:
+ nomad_arch: '386'
+ when: ansible_architecture == 'i386'
- - set_fact:
- nomad_arch: amd64
- when: ansible_architecture in ['x86_64', 'amd64']
+ - set_fact:
+ nomad_arch: amd64
+ when: ansible_architecture in ['x86_64', 'amd64']
- - name: Download nomad binary
- unarchive:
- src: '{{ nomad_uri }}'
- dest: '{{ remote_tmp_dir }}'
- remote_src: true
- register: result
- until: result is success
+ - name: Download nomad binary
+ unarchive:
+ src: '{{ nomad_uri }}'
+ dest: '{{ remote_tmp_dir }}'
+ remote_src: true
+ register: result
+ until: result is success
- - vars:
- remote_dir: '{{ echo_remote_tmp_dir.stdout }}'
- block:
-
- - command: echo {{ remote_tmp_dir }}
- register: echo_remote_tmp_dir
-
- - name: Run tests integration
+ - vars:
+ remote_dir: '{{ echo_remote_tmp_dir.stdout }}'
block:
- - name: Start nomad (dev mode enabled)
- shell: nohup {{ nomad_cmd }} agent -dev /dev/null 2>&1 &
- - name: wait nomad up
- wait_for:
- host: localhost
- port: 4646
- delay: 10
- timeout: 60
+ - command: echo {{ remote_tmp_dir }}
+ register: echo_remote_tmp_dir
- - import_tasks: nomad_job.yml
- always:
+ - name: Run tests integration
+ block:
+ - name: Start nomad (dev mode enabled)
+ shell: nohup {{ nomad_cmd }} agent -dev /dev/null 2>&1 &
- - name: kill nomad
- shell: pkill nomad
+ - name: wait nomad up
+ wait_for:
+ host: localhost
+ port: 4646
+ delay: 10
+ timeout: 60
+
+ - import_tasks: nomad_job.yml
+ always:
+
+ - name: kill nomad
+ shell: pkill nomad
diff --git a/tests/integration/targets/npm/tasks/main.yml b/tests/integration/targets/npm/tasks/main.yml
index 500e15fdb5..686bd3e434 100644
--- a/tests/integration/targets/npm/tasks/main.yml
+++ b/tests/integration/targets/npm/tasks/main.yml
@@ -16,17 +16,17 @@
- not (ansible_os_family == 'Alpine') # TODO
block:
- # expand remote path
- - command: 'echo {{ remote_tmp_dir }}'
- register: echo
- - set_fact:
- remote_dir: '{{ echo.stdout }}'
+ # expand remote path
+ - command: 'echo {{ remote_tmp_dir }}'
+ register: echo
+ - set_fact:
+ remote_dir: '{{ echo.stdout }}'
- - include_tasks: run.yml
- vars:
- nodejs_version: '{{ item }}'
- nodejs_path: 'node-v{{ nodejs_version }}-{{ ansible_system|lower }}-x{{ ansible_userspace_bits }}'
- with_items:
- - 7.10.1 # provides npm 4.2.0 (last npm < 5 released)
- - 8.0.0 # provides npm 5.0.0
- - 8.2.0 # provides npm 5.3.0 (output change with this version)
+ - include_tasks: run.yml
+ vars:
+ nodejs_version: '{{ item }}'
+ nodejs_path: 'node-v{{ nodejs_version }}-{{ ansible_system|lower }}-x{{ ansible_userspace_bits }}'
+ with_items:
+ - 7.10.1 # provides npm 4.2.0 (last npm < 5 released)
+ - 8.0.0 # provides npm 5.0.0
+ - 8.2.0 # provides npm 5.3.0 (output change with this version)
diff --git a/tests/integration/targets/odbc/aliases b/tests/integration/targets/odbc/aliases
index 0cc7b80d9d..ee15fde5bb 100644
--- a/tests/integration/targets/odbc/aliases
+++ b/tests/integration/targets/odbc/aliases
@@ -13,4 +13,6 @@ skip/rhel9.2
skip/rhel9.3
skip/rhel9.4
skip/rhel9.5
+skip/rhel9.6
+skip/rhel10.0
skip/freebsd
diff --git a/tests/integration/targets/odbc/defaults/main.yml b/tests/integration/targets/odbc/defaults/main.yml
index dd75f54718..45f94e5e2a 100644
--- a/tests/integration/targets/odbc/defaults/main.yml
+++ b/tests/integration/targets/odbc/defaults/main.yml
@@ -20,14 +20,14 @@ packages:
- postgresql-odbc
- unixODBC
- unixODBC-devel
- - gcc
+ - gcc
- gcc-c++
Debian:
- odbc-postgresql
- unixodbc
- unixodbc-dev
- - gcc
- - g++
+ - gcc
+ - g++
Suse:
- psqlODBC
- unixODBC
diff --git a/tests/integration/targets/one_host/tasks/main.yml b/tests/integration/targets/one_host/tasks/main.yml
index 3b2c1cedf3..839cda98e4 100644
--- a/tests/integration/targets/one_host/tasks/main.yml
+++ b/tests/integration/targets/one_host/tasks/main.yml
@@ -69,8 +69,8 @@
- name: "assert test_{{test_number}} failed"
assert:
that:
- - result is failed
- - result.results[0].msg == 'invalid host state ERROR'
+ - result is failed
+ - result.results[0].msg == 'invalid host state ERROR'
# ---
@@ -94,7 +94,7 @@
- name: "assert test_{{test_number}} worked"
assert:
that:
- - result.changed
+ - result.changed
# HOST ENABLEMENT
@@ -120,7 +120,7 @@
- name: "assert test_{{test_number}} worked"
assert:
that:
- - result.changed
+ - result.changed
# TEMPLATE MANAGEMENT
@@ -150,7 +150,7 @@
- name: "assert test_{{test_number}} worked"
assert:
that:
- - result.changed
+ - result.changed
# ---
@@ -180,7 +180,7 @@
- name: "assert test_{{test_number}} worked"
assert:
that:
- - result is not changed
+ - result is not changed
# HOST DISABLEMENT
@@ -205,7 +205,7 @@
- name: "assert test_{{test_number}} worked"
assert:
that:
- - result.changed
+ - result.changed
# HOST OFFLINE
@@ -230,7 +230,7 @@
- name: "assert test_{{test_number}} worked"
assert:
that:
- - result.changed
+ - result.changed
# TEARDOWN
diff --git a/tests/integration/targets/one_image/tasks/main.yml b/tests/integration/targets/one_image/tasks/main.yml
index c8736d73d8..aea8501aa7 100644
--- a/tests/integration/targets/one_image/tasks/main.yml
+++ b/tests/integration/targets/one_image/tasks/main.yml
@@ -166,7 +166,7 @@
- name: Assert that image was deleted
assert:
that:
- - result is changed
+ - result is changed
# Trying to run with wrong arguments
- name: Try to use name and ID at the same time
@@ -182,7 +182,7 @@
- name: Assert that task failed
assert:
that:
- - result is failed
+ - result is failed
- name: Try to rename image without specifying new name
one_image:
@@ -197,7 +197,7 @@
- name: Assert that task failed
assert:
that:
- - result is failed
+ - result is failed
- name: Try to rename image without specifying new name
one_image:
diff --git a/tests/integration/targets/one_image_info/tasks/main.yml b/tests/integration/targets/one_image_info/tasks/main.yml
index fede116241..00aacaa295 100644
--- a/tests/integration/targets/one_image_info/tasks/main.yml
+++ b/tests/integration/targets/one_image_info/tasks/main.yml
@@ -27,7 +27,7 @@
api_url: "{{ opennebula_url }}"
api_username: "{{ opennebula_username }}"
api_password: "{{ opennebula_password }}"
- ids:
+ ids:
- 2
- 2
- 8
diff --git a/tests/integration/targets/one_template/tasks/main.yml b/tests/integration/targets/one_template/tasks/main.yml
index 58bca9c6c5..0532c16107 100644
--- a/tests/integration/targets/one_template/tasks/main.yml
+++ b/tests/integration/targets/one_template/tasks/main.yml
@@ -70,7 +70,7 @@
- name: "assert that creation worked"
assert:
that:
- - result is changed
+ - result is changed
# Updating a template
@@ -123,7 +123,7 @@
- name: "assert that it updated the template"
assert:
that:
- - result is changed
+ - result is changed
- name: "Update an existing TEMPLATE with the same changes again"
one_template:
@@ -173,7 +173,7 @@
- name: "assert that there was no change"
assert:
that:
- - result is not changed
+ - result is not changed
# Deletion of templates
@@ -195,7 +195,7 @@
- name: "assert that there was no change"
assert:
that:
- - result is not changed
+ - result is not changed
- name: "Delete an existing TEMPLATE"
one_template:
@@ -214,7 +214,7 @@
- name: "assert that there was a change"
assert:
that:
- - result is changed
+ - result is changed
# Usage without `template` parameter
@@ -232,7 +232,7 @@
- name: "assert that it failed because template is missing"
assert:
that:
- - result is failed
+ - result is failed
# TEARDOWN
diff --git a/tests/integration/targets/one_vnet/tasks/main.yml b/tests/integration/targets/one_vnet/tasks/main.yml
index 084d4758ad..9e1164fcd7 100644
--- a/tests/integration/targets/one_vnet/tasks/main.yml
+++ b/tests/integration/targets/one_vnet/tasks/main.yml
@@ -112,7 +112,7 @@
- name: Assert that network was deleted
assert:
that:
- - result is changed
+ - result is changed
# Trying to run with wrong arguments
- name: Try to create use network with state=present and without the template parameter
@@ -161,7 +161,7 @@
api_url: "{{ opennebula_url }}"
api_username: "{{ opennebula_username }}"
api_password: "{{ opennebula_password }}"
- name:
+ name:
id: 0
state: present
register: result
diff --git a/tests/integration/targets/osx_defaults/tasks/main.yml b/tests/integration/targets/osx_defaults/tasks/main.yml
index 3ca3180f04..780c3f96c5 100644
--- a/tests/integration/targets/osx_defaults/tasks/main.yml
+++ b/tests/integration/targets/osx_defaults/tasks/main.yml
@@ -21,7 +21,7 @@
- name: Test if state and value are required together
assert:
that:
- - "'following are missing: value' in missing_value['msg']"
+ - "'following are missing: value' in missing_value['msg']"
- name: Change value of AppleMeasurementUnits to centimeter in check_mode
osx_defaults:
@@ -36,7 +36,7 @@
- name: Test if AppleMeasurementUnits value is changed to Centimeters in check_mode
assert:
that:
- - measure_task_check_mode.changed
+ - measure_task_check_mode.changed
- name: Find the current value of AppleMeasurementUnits
osx_defaults:
@@ -68,7 +68,7 @@
- name: Test if AppleMeasurementUnits value is changed to {{ new_value }}
assert:
that:
- - change_value.changed
+ - change_value.changed
- name: Again change value of AppleMeasurementUnits to {{ new_value }}
osx_defaults:
@@ -82,7 +82,7 @@
- name: Again test if AppleMeasurementUnits value is not changed to {{ new_value }}
assert:
that:
- - not change_value.changed
+ - not change_value.changed
- name: Check a fake setting for delete operation
osx_defaults:
diff --git a/tests/integration/targets/pids/tasks/main.yml b/tests/integration/targets/pids/tasks/main.yml
index c8feaacf3e..de4a2e406b 100644
--- a/tests/integration/targets/pids/tasks/main.yml
+++ b/tests/integration/targets/pids/tasks/main.yml
@@ -27,8 +27,8 @@
- name: "Verify that the list of Process IDs (PIDs) returned is empty"
assert:
that:
- - emptypids is not changed
- - emptypids.pids == []
+ - emptypids is not changed
+ - emptypids.pids == []
- name: "Picking a random process name"
set_fact:
@@ -38,7 +38,7 @@
copy:
src: sleeper.c
dest: "{{ remote_tmp_dir }}/sleeper.c"
- mode: 0644
+ mode: "0644"
- name: Compile fake 'sleep' binary
command: cc {{ remote_tmp_dir }}/sleeper.c -o {{ remote_tmp_dir }}/{{ random_name }}
@@ -47,7 +47,7 @@
template:
src: obtainpid.sh.j2
dest: "{{ remote_tmp_dir }}/obtainpid.sh"
- mode: 0755
+ mode: "0755"
- name: "Run the fake 'sleep' binary"
command: sh {{ remote_tmp_dir }}/obtainpid.sh
@@ -86,7 +86,7 @@
- name: "Reading pid from the file"
slurp:
- src: "{{ remote_tmp_dir }}/obtainpid.txt"
+ src: "{{ remote_tmp_dir }}/obtainpid.txt"
register: newpid
- name: Gather all processes to make debugging easier
@@ -101,12 +101,12 @@
- name: "Verify that the Process IDs (PIDs) returned is not empty and also equal to the PIDs obtained in console"
assert:
that:
- - "pids.pids | join(' ') == newpid.content | b64decode | trim"
- - "pids.pids | length > 0"
- - "exactpidmatch.pids == []"
- - "pattern_pid_match.pids | join(' ') == newpid.content | b64decode | trim"
- - "caseinsensitive_pattern_pid_match.pids | join(' ') == newpid.content | b64decode | trim"
- - newpid.content | b64decode | trim | int in match_all.pids
+ - "pids.pids | join(' ') == newpid.content | b64decode | trim"
+ - "pids.pids | length > 0"
+ - "exactpidmatch.pids == []"
+ - "pattern_pid_match.pids | join(' ') == newpid.content | b64decode | trim"
+ - "caseinsensitive_pattern_pid_match.pids | join(' ') == newpid.content | b64decode | trim"
+ - newpid.content | b64decode | trim | int in match_all.pids
- name: "Register output of bad input pattern"
pids:
diff --git a/tests/integration/targets/pipx/tasks/main.yml b/tests/integration/targets/pipx/tasks/main.yml
index 04086d80cd..6b83fa7335 100644
--- a/tests/integration/targets/pipx/tasks/main.yml
+++ b/tests/integration/targets/pipx/tasks/main.yml
@@ -3,22 +3,29 @@
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-- name: Determine pipx level
- block:
- - name: Install pipx>=1.7.0
- pip:
- name: pipx>=1.7.0
- - name: Set has_pipx170 fact true
- ansible.builtin.set_fact:
- has_pipx170: true
- rescue:
- - name: Set has_pipx170 fact false
- ansible.builtin.set_fact:
- has_pipx170: false
- - name: Install pipx (no version spec)
- pip:
- name: pipx
+- name: Bail out if Python < 3.8
+ when: ansible_python_version is version('3.8', '<')
+ ansible.builtin.meta: end_play
+- name: Install pipx>=1.7.0
+ ansible.builtin.pip:
+ name: pipx>=1.7.0
+ extra_args: --user
+- name: Determine packaging level
+ block:
+ - name: Install packaging>=22.0
+ ansible.builtin.pip:
+ name: packaging>=22.0
+ - name: Set has_packaging22 fact true
+ ansible.builtin.set_fact:
+ has_packaging22: true
+ rescue:
+ - name: Set has_packaging22 fact false
+ ansible.builtin.set_fact:
+ has_packaging22: false
+ - name: Install has_packaging (no version spec)
+ ansible.builtin.pip:
+ name: packaging
##############################################################################
- name: ensure application tox is uninstalled
@@ -164,7 +171,7 @@
community.general.pipx:
state: absent
name: tox
- register: uninstall_tox_latest
+ register: uninstall_tox_1
- name: install application tox 3.24.0 for latest
community.general.pipx:
@@ -208,26 +215,55 @@
community.general.pipx:
state: absent
name: tox
- register: uninstall_tox_again
+ register: uninstall_tox_2
+
+- name: install tox with dependency group 'docs'
+ community.general.pipx:
+ name: tox
+ source: tox[docs]
+ state: latest
+ register: install_tox_latest_docs
+
+- name: install tox with dependency group 'docs' again
+ community.general.pipx:
+ name: tox
+ source: tox[docs]
+ state: latest
+ register: install_tox_latest_docs_again
+
+- name: cleanup tox latest yet again
+ community.general.pipx:
+ state: absent
+ name: tox
+ register: uninstall_tox_3
- name: check assertions tox latest
assert:
that:
- install_tox_latest is changed
- - uninstall_tox_latest is changed
+ - "'tox' in install_tox_latest.application"
+ - install_tox_latest.application.tox.version != '3.24.0'
+ - uninstall_tox_1 is changed
+ - "'tox' not in uninstall_tox_1.application"
- install_tox_324_for_latest is changed
+ - "'tox' in install_tox_324_for_latest.application"
- install_tox_324_for_latest.application.tox.version == '3.24.0'
- install_tox_latest_with_preinstall is changed
- - install_tox_latest_with_preinstall.application.tox.version == latest_tox_version
+ - "'tox' in install_tox_latest_with_preinstall.application"
+ - install_tox_latest_with_preinstall.application.tox.version != '3.24.0'
- install_tox_latest_with_preinstall_again is not changed
- - install_tox_latest_with_preinstall_again.application.tox.version == latest_tox_version
- install_tox_latest_with_preinstall_again_force is changed
- - install_tox_latest_with_preinstall_again_force.application.tox.version == latest_tox_version
- - uninstall_tox_latest_again is changed
- - install_tox_with_deps is changed
- - install_tox_with_deps.application.tox.version == latest_tox_version
- - uninstall_tox_again is changed
- - "'tox' not in uninstall_tox_again.application"
+ - uninstall_tox_2 is changed
+ - "'tox' not in uninstall_tox_2.application"
+ - install_tox_latest_docs is changed
+ - install_tox_latest_docs_again is not changed
+ - uninstall_tox_3 is changed
+ - "'tox' not in uninstall_tox_3.application"
+
+##############################################################################
+# Test version specifiers in name parameter
+- name: Run version specifier tests
+ ansible.builtin.include_tasks: testcase-10031-version-specs.yml
##############################################################################
@@ -246,27 +282,23 @@
- name: Include testcase for issue 8656
ansible.builtin.include_tasks: testcase-8656.yml
-- name: Recent features
- when:
- - has_pipx170
- block:
- - name: Include testcase for PR 8793 --global
- ansible.builtin.include_tasks: testcase-8793-global.yml
+- name: Include testcase for PR 8793 --global
+ ansible.builtin.include_tasks: testcase-8793-global.yml
- - name: Include testcase for PR 8809 install-all
- ansible.builtin.include_tasks: testcase-8809-installall.yml
+- name: Include testcase for PR 8809 install-all
+ ansible.builtin.include_tasks: testcase-8809-installall.yml
- - name: Include testcase for PR 8809 pin
- ansible.builtin.include_tasks: testcase-8809-pin.yml
+- name: Include testcase for PR 8809 pin
+ ansible.builtin.include_tasks: testcase-8809-pin.yml
- - name: Include testcase for PR 8809 injectpkg
- ansible.builtin.include_tasks: testcase-8809-uninjectpkg.yml
+- name: Include testcase for PR 8809 injectpkg
+ ansible.builtin.include_tasks: testcase-8809-uninjectpkg.yml
- - name: Include testcase for PR 9009 injectpkg --global
- ansible.builtin.include_tasks: testcase-9009-fixglobal.yml
+- name: Include testcase for PR 9009 injectpkg --global
+ ansible.builtin.include_tasks: testcase-9009-fixglobal.yml
- - name: Include testcase for PR 9103 upgrade --global
- ansible.builtin.include_tasks: testcase-9103-upgrade-global.yml
+- name: Include testcase for PR 9103 upgrade --global
+ ansible.builtin.include_tasks: testcase-9103-upgrade-global.yml
- - name: Include testcase for issue 9619 latest --global
- ansible.builtin.include_tasks: testcase-9619-latest-global.yml
+- name: Include testcase for issue 9619 latest --global
+ ansible.builtin.include_tasks: testcase-9619-latest-global.yml
diff --git a/tests/integration/targets/pipx/tasks/testcase-10031-version-specs.yml b/tests/integration/targets/pipx/tasks/testcase-10031-version-specs.yml
new file mode 100644
index 0000000000..e018720bd5
--- /dev/null
+++ b/tests/integration/targets/pipx/tasks/testcase-10031-version-specs.yml
@@ -0,0 +1,83 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+##############################################################################
+# Test version specifiers in name parameter
+
+- name: Ensure tox is uninstalled
+ community.general.pipx:
+ state: absent
+ name: tox
+ register: uninstall_tox
+
+- name: Install tox with version specifier in name
+ community.general.pipx:
+ name: tox>=3.22.0,<3.27.0
+ register: install_tox_version
+
+- name: Install tox with same version specifier (idempotency check)
+ community.general.pipx:
+ name: tox>=3.22.0,<3.27.0
+ register: install_tox_version_again
+
+- name: Ensure tox is uninstalled again
+ community.general.pipx:
+ state: absent
+ name: tox
+
+- name: Install tox with extras and version
+ community.general.pipx:
+ name: "tox[testing]>=3.22.0,<3.27.0"
+ register: install_tox_extras
+ ignore_errors: true # Some versions might not have this extra
+
+- name: Install tox with higher version specifier
+ community.general.pipx:
+ name: "tox>=3.27.0"
+ register: install_tox_higher_version
+
+- name: Install tox with higher version specifier (force)
+ community.general.pipx:
+ name: "tox>=3.27.0"
+ force: true
+ register: install_tox_higher_version_force
+
+- name: Cleanup tox
+ community.general.pipx:
+ state: absent
+ name: tox
+ register: uninstall_tox_final
+
+- name: Check version specifier assertions
+ assert:
+ that:
+ - install_tox_version is changed
+ - "'tox' in install_tox_version.application"
+ - "install_tox_version.application.tox.version is version('3.22.0', '>=')"
+ - "install_tox_version.application.tox.version is version('3.27.0', '<')"
+ - install_tox_version_again is not changed
+ - "'tox' in install_tox_extras.application"
+ - "install_tox_extras.application.tox.version is version('3.22.0', '>=')"
+ - "install_tox_extras.application.tox.version is version('3.27.0', '<')"
+ - install_tox_higher_version is changed
+ - install_tox_higher_version_force is changed
+ - uninstall_tox_final is changed
+ - "'tox' not in uninstall_tox_final.application"
+
+- name: If packaging is recent
+ when:
+ - has_packaging22
+ block:
+ - name: Install tox with invalid version specifier
+ community.general.pipx:
+ name: "tox>>>>>3.27.0"
+ register: install_tox_invalid
+ ignore_errors: true
+
+ - name: Check version specifier assertions
+ assert:
+ that:
+ - install_tox_invalid is failed
+ - "'Invalid package specification' in install_tox_invalid.msg"
diff --git a/tests/integration/targets/pipx/tasks/testcase-oldsitewide.yml b/tests/integration/targets/pipx/tasks/testcase-oldsitewide.yml
index 1db3e60406..812cd9bd74 100644
--- a/tests/integration/targets/pipx/tasks/testcase-oldsitewide.yml
+++ b/tests/integration/targets/pipx/tasks/testcase-oldsitewide.yml
@@ -7,7 +7,7 @@
ansible.builtin.file:
path: /opt/pipx
state: directory
- mode: 0755
+ mode: "0755"
- name: Install tox site-wide
community.general.pipx:
diff --git a/tests/integration/targets/pipx_info/tasks/main.yml b/tests/integration/targets/pipx_info/tasks/main.yml
index e3de105d6f..d51ce1b33e 100644
--- a/tests/integration/targets/pipx_info/tasks/main.yml
+++ b/tests/integration/targets/pipx_info/tasks/main.yml
@@ -3,47 +3,53 @@
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-- name: install pipx
- pip:
- name: pipx
+- name: Bail out if Python < 3.8
+ when: ansible_python_version is version('3.8', '<')
+ ansible.builtin.meta: end_play
+- name: Install pipx>=1.7.0
+ ansible.builtin.pip:
+ name: pipx>=1.7.0
extra_args: --user
##############################################################################
-- name: ensure application tox is uninstalled
+- name: Ensure applications are uninstalled
community.general.pipx:
+ name: "{{ item }}"
state: absent
- name: tox
+ loop:
+ - tox
+ - pylint
-- name: retrieve applications (empty)
+- name: Retrieve applications (empty)
community.general.pipx_info: {}
register: info_empty
-- name: install application tox
+- name: Install application tox
community.general.pipx:
name: tox
-- name: retrieve applications
+- name: Retrieve applications
community.general.pipx_info: {}
register: info_all
-- name: retrieve applications (include_deps=true)
+- name: Retrieve applications (include_deps=true)
community.general.pipx_info:
include_deps: true
register: info_all_deps
-- name: retrieve application tox
+- name: Retrieve application tox
community.general.pipx_info:
name: tox
include_deps: true
register: info_tox
-- name: uninstall application tox
+- name: Uninstall application tox
community.general.pipx:
state: absent
name: tox
-- name: check assertions tox
- assert:
+- name: Check assertions tox
+ ansible.builtin.assert:
that:
- info_empty.application|length == 0
@@ -63,8 +69,8 @@
- info_tox.application == info_all_deps.application
##############################################################################
-- name: set test applications
- set_fact:
+- name: Set test applications
+ ansible.builtin.set_fact:
apps:
- name: tox
source: tox==3.24.0
@@ -72,19 +78,19 @@
inject_packages:
- licenses
-- name: ensure applications are uninstalled
+- name: Ensure applications are uninstalled
community.general.pipx:
name: "{{ item.name }}"
state: absent
loop: "{{ apps }}"
-- name: install applications
+- name: Install applications
community.general.pipx:
name: "{{ item.name }}"
source: "{{ item.source | default(omit) }}"
loop: "{{ apps }}"
-- name: inject packages
+- name: Inject packages
community.general.pipx:
state: inject
name: "{{ item.name }}"
@@ -92,31 +98,31 @@
when: "'inject_packages' in item"
loop: "{{ apps }}"
-- name: retrieve applications
+- name: Retrieve applications
community.general.pipx_info: {}
register: info2_all
-- name: retrieve applications (include_deps=true)
+- name: Retrieve applications (include_deps=true)
community.general.pipx_info:
include_deps: true
include_injected: true
register: info2_all_deps
-- name: retrieve application pylint
+- name: Retrieve application pylint
community.general.pipx_info:
name: pylint
include_deps: true
include_injected: true
register: info2_lint
-- name: ensure applications are uninstalled
+- name: Ensure applications are uninstalled
community.general.pipx:
name: "{{ item.name }}"
state: absent
loop: "{{ apps }}"
-- name: check assertions multiple apps
- assert:
+- name: Check assertions multiple apps
+ ansible.builtin.assert:
that:
- all_apps|length == 2
- all_apps[1].name == "tox"
@@ -135,6 +141,6 @@
- all_apps_deps|length == 2
- lint[0] == all_apps_deps[0]
vars:
- all_apps: "{{ info2_all.application|sort(attribute='name') }}"
+ all_apps: "{{ info2_all.application | sort(attribute='name') }}"
all_apps_deps: "{{ info2_all_deps.application | sort(attribute='name') }}"
lint: "{{ info2_lint.application | sort(attribute='name') }}"
diff --git a/tests/integration/targets/pkgng/tasks/freebsd.yml b/tests/integration/targets/pkgng/tasks/freebsd.yml
index 2dcd7b02a8..b51ad836bd 100644
--- a/tests/integration/targets/pkgng/tasks/freebsd.yml
+++ b/tests/integration/targets/pkgng/tasks/freebsd.yml
@@ -534,12 +534,14 @@
#
# NOTE: FreeBSD 14.2 fails as well (someone with FreeBSD knowledge has to take a look)
#
+ # NOTE: FreeBSD 14.3 fails as well (someone with FreeBSD knowledge has to take a look)
+ #
# See also
# https://github.com/ansible-collections/community.general/issues/5795
when: >-
(ansible_distribution_version is version('12.01', '>=') and ansible_distribution_version is version('12.3', '<'))
or (ansible_distribution_version is version('13.6', '>=') and ansible_distribution_version is version('14.0', '<'))
- or ansible_distribution_version is version('14.3', '>=')
+ or ansible_distribution_version is version('14.4', '>=')
block:
- name: Setup testjail
include_tasks: setup-testjail.yml
diff --git a/tests/integration/targets/pkgutil/tasks/main.yml b/tests/integration/targets/pkgutil/tasks/main.yml
index 8ceb4adcc3..e7f665efbf 100644
--- a/tests/integration/targets/pkgutil/tasks/main.yml
+++ b/tests/integration/targets/pkgutil/tasks/main.yml
@@ -24,7 +24,7 @@
- name: Verify cm_add_package
assert:
that:
- - cm_add_package is changed
+ - cm_add_package is changed
- name: Add package (normal mode)
pkgutil:
@@ -35,7 +35,7 @@
- name: Verify nm_add_package
assert:
that:
- - nm_add_package is changed
+ - nm_add_package is changed
- name: Add package again (check_mode)
pkgutil:
@@ -47,7 +47,7 @@
- name: Verify cm_add_package_again
assert:
that:
- - cm_add_package_again is not changed
+ - cm_add_package_again is not changed
- name: Add package again (normal mode)
pkgutil:
@@ -58,7 +58,7 @@
- name: Verify nm_add_package_again
assert:
that:
- - nm_add_package_again is not changed
+ - nm_add_package_again is not changed
# REMOVE PACKAGE
@@ -72,7 +72,7 @@
- name: Verify cm_remove_package
assert:
that:
- - cm_remove_package is changed
+ - cm_remove_package is changed
- name: Remove package (normal mode)
pkgutil:
@@ -83,7 +83,7 @@
- name: Verify nm_remove_package
assert:
that:
- - nm_remove_package is changed
+ - nm_remove_package is changed
- name: Remove package again (check_mode)
pkgutil:
@@ -95,7 +95,7 @@
- name: Verify cm_remove_package_again
assert:
that:
- - cm_remove_package_again is not changed
+ - cm_remove_package_again is not changed
- name: Remove package again (normal mode)
pkgutil:
@@ -106,7 +106,7 @@
- name: Verify nm_remove_package_again
assert:
that:
- - nm_remove_package_again is not changed
+ - nm_remove_package_again is not changed
# RESTORE ENVIRONMENT
diff --git a/tests/integration/targets/proxmox/tasks/main.yml b/tests/integration/targets/proxmox/tasks/main.yml
deleted file mode 100644
index 4e393cbf3b..0000000000
--- a/tests/integration/targets/proxmox/tasks/main.yml
+++ /dev/null
@@ -1,616 +0,0 @@
-####################################################################
-# WARNING: These are designed specifically for Ansible tests #
-# and should not be used as examples of how to write Ansible roles #
-####################################################################
-
-# Copyright (c) 2020, Tristan Le Guern
-# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-- name: List domains
- proxmox_domain_info:
- api_host: "{{ api_host }}"
- api_user: "{{ user }}@{{ domain }}"
- api_password: "{{ api_password | default(omit) }}"
- api_token_id: "{{ api_token_id | default(omit) }}"
- api_token_secret: "{{ api_token_secret | default(omit) }}"
- validate_certs: "{{ validate_certs }}"
- register: results
-
-- assert:
- that:
- - results is not changed
- - results.proxmox_domains is defined
-
-- name: Retrieve info about pve
- proxmox_domain_info:
- api_host: "{{ api_host }}"
- api_user: "{{ user }}@{{ domain }}"
- api_password: "{{ api_password | default(omit) }}"
- api_token_id: "{{ api_token_id | default(omit) }}"
- api_token_secret: "{{ api_token_secret | default(omit) }}"
- validate_certs: "{{ validate_certs }}"
- domain: pve
- register: results
-
-- assert:
- that:
- - results is not changed
- - results.proxmox_domains is defined
- - results.proxmox_domains|length == 1
- - results.proxmox_domains[0].type == 'pve'
-
-- name: List groups
- proxmox_group_info:
- api_host: "{{ api_host }}"
- api_user: "{{ user }}@{{ domain }}"
- api_password: "{{ api_password | default(omit) }}"
- api_token_id: "{{ api_token_id | default(omit) }}"
- api_token_secret: "{{ api_token_secret | default(omit) }}"
- validate_certs: "{{ validate_certs }}"
- register: results
-
-- assert:
- that:
- - results is not changed
- - results.proxmox_groups is defined
-
-- name: List users
- proxmox_user_info:
- api_host: "{{ api_host }}"
- api_user: "{{ user }}@{{ domain }}"
- api_password: "{{ api_password | default(omit) }}"
- api_token_id: "{{ api_token_id | default(omit) }}"
- api_token_secret: "{{ api_token_secret | default(omit) }}"
- validate_certs: "{{ validate_certs }}"
- register: results
-
-- assert:
- that:
- - results is not changed
- - results.proxmox_users is defined
-
-- name: Retrieve info about api_user using name and domain
- proxmox_user_info:
- api_host: "{{ api_host }}"
- api_user: "{{ user }}@{{ domain }}"
- api_password: "{{ api_password | default(omit) }}"
- api_token_id: "{{ api_token_id | default(omit) }}"
- api_token_secret: "{{ api_token_secret | default(omit) }}"
- validate_certs: "{{ validate_certs }}"
- user: "{{ user }}"
- domain: "{{ domain }}"
- register: results_user_domain
-
-- assert:
- that:
- - results_user_domain is not changed
- - results_user_domain.proxmox_users is defined
- - results_user_domain.proxmox_users|length == 1
- - results_user_domain.proxmox_users[0].domain == "{{ domain }}"
- - results_user_domain.proxmox_users[0].user == "{{ user }}"
- - results_user_domain.proxmox_users[0].userid == "{{ user }}@{{ domain }}"
-
-- name: Retrieve info about api_user using userid
- proxmox_user_info:
- api_host: "{{ api_host }}"
- api_user: "{{ user }}@{{ domain }}"
- api_password: "{{ api_password | default(omit) }}"
- api_token_id: "{{ api_token_id | default(omit) }}"
- api_token_secret: "{{ api_token_secret | default(omit) }}"
- validate_certs: "{{ validate_certs }}"
- userid: "{{ user }}@{{ domain }}"
- register: results_userid
-
-- assert:
- that:
- - results_userid is not changed
- - results_userid.proxmox_users is defined
- - results_userid.proxmox_users|length == 1
- - results_userid.proxmox_users[0].domain == "{{ domain }}"
- - results_userid.proxmox_users[0].user == "{{ user }}"
- - results_userid.proxmox_users[0].userid == "{{ user }}@{{ domain }}"
-
-- name: Retrieve info about storage
- proxmox_storage_info:
- api_host: "{{ api_host }}"
- api_user: "{{ user }}@{{ domain }}"
- api_password: "{{ api_password | default(omit) }}"
- api_token_id: "{{ api_token_id | default(omit) }}"
- api_token_secret: "{{ api_token_secret | default(omit) }}"
- validate_certs: "{{ validate_certs }}"
- storage: "{{ storage }}"
- register: results_storage
-
-- assert:
- that:
- - results_storage is not changed
- - results_storage.proxmox_storages is defined
- - results_storage.proxmox_storages|length == 1
- - results_storage.proxmox_storages[0].storage == "{{ storage }}"
-
-- name: List content on storage
- proxmox_storage_contents_info:
- api_host: "{{ api_host }}"
- api_user: "{{ user }}@{{ domain }}"
- api_password: "{{ api_password | default(omit) }}"
- api_token_id: "{{ api_token_id | default(omit) }}"
- api_token_secret: "{{ api_token_secret | default(omit) }}"
- validate_certs: "{{ validate_certs }}"
- storage: "{{ storage }}"
- node: "{{ node }}"
- content: images
- register: results_list_storage
-
-- assert:
- that:
- - results_storage is not changed
- - results_storage.proxmox_storage_content is defined
- - results_storage.proxmox_storage_content |length == 1
-
-- name: VM creation
- tags: [ 'create' ]
- block:
- - name: Create test vm test-instance
- proxmox_kvm:
- api_host: "{{ api_host }}"
- api_user: "{{ user }}@{{ domain }}"
- api_password: "{{ api_password | default(omit) }}"
- api_token_id: "{{ api_token_id | default(omit) }}"
- api_token_secret: "{{ api_token_secret | default(omit) }}"
- validate_certs: "{{ validate_certs }}"
- node: "{{ node }}"
- storage: "{{ storage }}"
- vmid: "{{ from_vmid }}"
- name: test-instance
- clone: 'yes'
- state: present
- tags:
- - TagWithUppercaseChars
- timeout: 500
- register: results_kvm
-
- - set_fact:
- vmid: "{{ results_kvm.msg.split(' ')[-7] }}"
-
- - assert:
- that:
- - results_kvm is changed
- - results_kvm.vmid == from_vmid
- - results_kvm.msg == "VM test-instance with newid {{ vmid }} cloned from vm with vmid {{ from_vmid }}"
-
- - pause:
- seconds: 30
-
-- name: VM start
- tags: [ 'start' ]
- block:
- - name: Start test VM
- proxmox_kvm:
- api_host: "{{ api_host }}"
- api_user: "{{ user }}@{{ domain }}"
- api_password: "{{ api_password | default(omit) }}"
- api_token_id: "{{ api_token_id | default(omit) }}"
- api_token_secret: "{{ api_token_secret | default(omit) }}"
- validate_certs: "{{ validate_certs }}"
- node: "{{ node }}"
- vmid: "{{ vmid }}"
- state: started
- register: results_action_start
-
- - assert:
- that:
- - results_action_start is changed
- - results_action_start.status == 'stopped'
- - results_action_start.vmid == {{ vmid }}
- - results_action_start.msg == "VM {{ vmid }} started"
-
- - pause:
- seconds: 90
-
- - name: Try to start test VM again
- proxmox_kvm:
- api_host: "{{ api_host }}"
- api_user: "{{ user }}@{{ domain }}"
- api_password: "{{ api_password | default(omit) }}"
- api_token_id: "{{ api_token_id | default(omit) }}"
- api_token_secret: "{{ api_token_secret | default(omit) }}"
- validate_certs: "{{ validate_certs }}"
- node: "{{ node }}"
- vmid: "{{ vmid }}"
- state: started
- register: results_action_start_again
-
- - assert:
- that:
- - results_action_start_again is not changed
- - results_action_start_again.status == 'running'
- - results_action_start_again.vmid == {{ vmid }}
- - results_action_start_again.msg == "VM {{ vmid }} is already running"
-
- - name: Check current status
- proxmox_kvm:
- api_host: "{{ api_host }}"
- api_user: "{{ user }}@{{ domain }}"
- api_password: "{{ api_password | default(omit) }}"
- api_token_id: "{{ api_token_id | default(omit) }}"
- api_token_secret: "{{ api_token_secret | default(omit) }}"
- validate_certs: "{{ validate_certs }}"
- node: "{{ node }}"
- vmid: "{{ vmid }}"
- state: current
- register: results_action_current
-
- - assert:
- that:
- - results_action_current is not changed
- - results_action_current.status == 'running'
- - results_action_current.vmid == {{ vmid }}
- - results_action_current.msg == "VM test-instance with vmid = {{ vmid }} is running"
-
-- name: VM add/change/delete NIC
- tags: [ 'nic' ]
- block:
- - name: Add NIC to test VM
- proxmox_nic:
- api_host: "{{ api_host }}"
- api_user: "{{ user }}@{{ domain }}"
- api_password: "{{ api_password | default(omit) }}"
- api_token_id: "{{ api_token_id | default(omit) }}"
- api_token_secret: "{{ api_token_secret | default(omit) }}"
- validate_certs: "{{ validate_certs }}"
- vmid: "{{ vmid }}"
- state: present
- interface: net5
- bridge: vmbr0
- tag: 42
- register: results
-
- - assert:
- that:
- - results is changed
- - results.vmid == {{ vmid }}
- - results.msg == "Nic net5 updated on VM with vmid {{ vmid }}"
-
- - name: Update NIC no changes
- proxmox_nic:
- api_host: "{{ api_host }}"
- api_user: "{{ user }}@{{ domain }}"
- api_password: "{{ api_password | default(omit) }}"
- api_token_id: "{{ api_token_id | default(omit) }}"
- api_token_secret: "{{ api_token_secret | default(omit) }}"
- validate_certs: "{{ validate_certs }}"
- vmid: "{{ vmid }}"
- state: present
- interface: net5
- bridge: vmbr0
- tag: 42
- register: results
-
- - assert:
- that:
- - results is not changed
- - results.vmid == {{ vmid }}
- - results.msg == "Nic net5 unchanged on VM with vmid {{ vmid }}"
-
- - name: Update NIC with changes
- proxmox_nic:
- api_host: "{{ api_host }}"
- api_user: "{{ user }}@{{ domain }}"
- api_password: "{{ api_password | default(omit) }}"
- api_token_id: "{{ api_token_id | default(omit) }}"
- api_token_secret: "{{ api_token_secret | default(omit) }}"
- validate_certs: "{{ validate_certs }}"
- vmid: "{{ vmid }}"
- state: present
- interface: net5
- bridge: vmbr0
- tag: 24
- firewall: true
- register: results
-
- - assert:
- that:
- - results is changed
- - results.vmid == {{ vmid }}
- - results.msg == "Nic net5 updated on VM with vmid {{ vmid }}"
-
- - name: Delete NIC
- proxmox_nic:
- api_host: "{{ api_host }}"
- api_user: "{{ user }}@{{ domain }}"
- api_password: "{{ api_password | default(omit) }}"
- api_token_id: "{{ api_token_id | default(omit) }}"
- api_token_secret: "{{ api_token_secret | default(omit) }}"
- validate_certs: "{{ validate_certs }}"
- vmid: "{{ vmid }}"
- state: absent
- interface: net5
- register: results
-
- - assert:
- that:
- - results is changed
- - results.vmid == {{ vmid }}
- - results.msg == "Nic net5 deleted on VM with vmid {{ vmid }}"
-
-- name: Create new disk in VM
- tags: ['create_disk']
- block:
- - name: Add new disk (without force) to VM
- proxmox_disk:
- api_host: "{{ api_host }}"
- api_user: "{{ user }}@{{ domain }}"
- api_password: "{{ api_password | default(omit) }}"
- api_token_id: "{{ api_token_id | default(omit) }}"
- api_token_secret: "{{ api_token_secret | default(omit) }}"
- vmid: "{{ vmid }}"
- disk: "{{ disk }}"
- storage: "{{ storage }}"
- size: 1
- state: present
- register: results
-
- - assert:
- that:
- - results is changed
- - results.vmid == {{ vmid }}
- - results.msg == "Disk {{ disk }} created in VM {{ vmid }}"
-
- - name: Try add disk again with same options (expect no-op)
- proxmox_disk:
- api_host: "{{ api_host }}"
- api_user: "{{ user }}@{{ domain }}"
- api_password: "{{ api_password | default(omit) }}"
- api_token_id: "{{ api_token_id | default(omit) }}"
- api_token_secret: "{{ api_token_secret | default(omit) }}"
- vmid: "{{ vmid }}"
- disk: "{{ disk }}"
- storage: "{{ storage }}"
- size: 1
- state: present
- register: results
-
- - assert:
- that:
- - results is not changed
- - results.vmid == {{ vmid }}
- - results.msg == "Disk {{ disk }} is up to date in VM {{ vmid }}"
-
- - name: Add new disk replacing existing disk (detach old and leave unused)
- proxmox_disk:
- api_host: "{{ api_host }}"
- api_user: "{{ user }}@{{ domain }}"
- api_password: "{{ api_password | default(omit) }}"
- api_token_id: "{{ api_token_id | default(omit) }}"
- api_token_secret: "{{ api_token_secret | default(omit) }}"
- vmid: "{{ vmid }}"
- disk: "{{ disk }}"
- storage: "{{ storage }}"
- size: 2
- create: forced
- state: present
- register: results
-
- - assert:
- that:
- - results is changed
- - results.vmid == {{ vmid }}
- - results.msg == "Disk {{ disk }} created in VM {{ vmid }}"
-
-- name: Update existing disk in VM
- tags: ['update_disk']
- block:
- - name: Update disk configuration
- proxmox_disk:
- api_host: "{{ api_host }}"
- api_user: "{{ user }}@{{ domain }}"
- api_password: "{{ api_password | default(omit) }}"
- api_token_id: "{{ api_token_id | default(omit) }}"
- api_token_secret: "{{ api_token_secret | default(omit) }}"
- vmid: "{{ vmid }}"
- disk: "{{ disk }}"
- backup: false
- ro: true
- aio: native
- state: present
- register: results
-
- - assert:
- that:
- - results is changed
- - results.vmid == {{ vmid }}
- - results.msg == "Disk {{ disk }} updated in VM {{ vmid }}"
-
-- name: Grow existing disk in VM
- tags: ['grow_disk']
- block:
- - name: Increase disk size
- proxmox_disk:
- api_host: "{{ api_host }}"
- api_user: "{{ user }}@{{ domain }}"
- api_password: "{{ api_password | default(omit) }}"
- api_token_id: "{{ api_token_id | default(omit) }}"
- api_token_secret: "{{ api_token_secret | default(omit) }}"
- vmid: "{{ vmid }}"
- disk: "{{ disk }}"
- size: +1G
- state: resized
- register: results
-
- - assert:
- that:
- - results is changed
- - results.vmid == {{ vmid }}
- - results.msg == "Disk {{ disk }} resized in VM {{ vmid }}"
-
-- name: Detach disk and leave it unused
- tags: ['detach_disk']
- block:
- - name: Detach disk
- proxmox_disk:
- api_host: "{{ api_host }}"
- api_user: "{{ user }}@{{ domain }}"
- api_password: "{{ api_password | default(omit) }}"
- api_token_id: "{{ api_token_id | default(omit) }}"
- api_token_secret: "{{ api_token_secret | default(omit) }}"
- vmid: "{{ vmid }}"
- disk: "{{ disk }}"
- state: detached
- register: results
-
- - assert:
- that:
- - results is changed
- - results.vmid == {{ vmid }}
- - results.msg == "Disk {{ disk }} detached from VM {{ vmid }}"
-
-- name: Move disk to another storage or another VM
- tags: ['move_disk']
- block:
- - name: Move disk to another storage inside same VM
- proxmox_disk:
- api_host: "{{ api_host }}"
- api_user: "{{ user }}@{{ domain }}"
- api_password: "{{ api_password | default(omit) }}"
- api_token_id: "{{ api_token_id | default(omit) }}"
- api_token_secret: "{{ api_token_secret | default(omit) }}"
- vmid: "{{ vmid }}"
- disk: "{{ disk }}"
- target_storage: "{{ target_storage }}"
- format: "{{ target_format }}"
- state: moved
- register: results
-
- - assert:
- that:
- - results is changed
- - results.vmid == {{ vmid }}
- - results.msg == "Disk {{ disk }} moved from VM {{ vmid }} storage {{ results.storage }}"
-
- - name: Move disk to another VM (same storage)
- proxmox_disk:
- api_host: "{{ api_host }}"
- api_user: "{{ user }}@{{ domain }}"
- api_password: "{{ api_password | default(omit) }}"
- api_token_id: "{{ api_token_id | default(omit) }}"
- api_token_secret: "{{ api_token_secret | default(omit) }}"
- vmid: "{{ vmid }}"
- disk: "{{ disk }}"
- target_vmid: "{{ target_vm }}"
- target_disk: "{{ target_disk }}"
- state: moved
- register: results
-
- - assert:
- that:
- - results is changed
- - results.vmid == {{ vmid }}
- - results.msg == "Disk {{ disk }} moved from VM {{ vmid }} storage {{ results.storage }}"
-
-
-- name: Remove disk permanently
- tags: ['remove_disk']
- block:
- - name: Remove disk
- proxmox_disk:
- api_host: "{{ api_host }}"
- api_user: "{{ user }}@{{ domain }}"
- api_password: "{{ api_password | default(omit) }}"
- api_token_id: "{{ api_token_id | default(omit) }}"
- api_token_secret: "{{ api_token_secret | default(omit) }}"
- vmid: "{{ target_vm }}"
- disk: "{{ target_disk }}"
- state: absent
- register: results
-
- - assert:
- that:
- - results is changed
- - results.vmid == {{ target_vm }}
- - results.msg == "Disk {{ target_disk }} removed from VM {{ target_vm }}"
-
-- name: VM stop
- tags: [ 'stop' ]
- block:
- - name: Stop test VM
- proxmox_kvm:
- api_host: "{{ api_host }}"
- api_user: "{{ user }}@{{ domain }}"
- api_password: "{{ api_password | default(omit) }}"
- api_token_id: "{{ api_token_id | default(omit) }}"
- api_token_secret: "{{ api_token_secret | default(omit) }}"
- validate_certs: "{{ validate_certs }}"
- node: "{{ node }}"
- vmid: "{{ vmid }}"
- state: stopped
- register: results_action_stop
-
- - assert:
- that:
- - results_action_stop is changed
- - results_action_stop.status == 'running'
- - results_action_stop.vmid == {{ vmid }}
- - results_action_stop.msg == "VM {{ vmid }} is shutting down"
-
- - pause:
- seconds: 5
-
- - name: Check current status again
- proxmox_kvm:
- api_host: "{{ api_host }}"
- api_user: "{{ user }}@{{ domain }}"
- api_password: "{{ api_password | default(omit) }}"
- api_token_id: "{{ api_token_id | default(omit) }}"
- api_token_secret: "{{ api_token_secret | default(omit) }}"
- validate_certs: "{{ validate_certs }}"
- node: "{{ node }}"
- vmid: "{{ vmid }}"
- state: current
- register: results_action_current
-
- - assert:
- that:
- - results_action_current is not changed
- - results_action_current.status == 'stopped'
- - results_action_current.vmid == {{ vmid }}
- - results_action_current.msg == "VM test-instance with vmid = {{ vmid }} is stopped"
-
-- name: VM destroy
- tags: [ 'destroy' ]
- block:
- - name: Destroy test VM
- proxmox_kvm:
- api_host: "{{ api_host }}"
- api_user: "{{ user }}@{{ domain }}"
- api_password: "{{ api_password | default(omit) }}"
- api_token_id: "{{ api_token_id | default(omit) }}"
- api_token_secret: "{{ api_token_secret | default(omit) }}"
- validate_certs: "{{ validate_certs }}"
- node: "{{ node }}"
- vmid: "{{ vmid }}"
- state: absent
- register: results_kvm_destroy
-
- - assert:
- that:
- - results_kvm_destroy is changed
- - results_kvm_destroy.vmid == {{ vmid }}
- - results_kvm_destroy.msg == "VM {{ vmid }} removed"
-
-- name: Retrieve information about nodes
- proxmox_node_info:
- api_host: "{{ api_host }}"
- api_user: "{{ user }}@{{ domain }}"
- api_password: "{{ api_password | default(omit) }}"
- api_token_id: "{{ api_token_id | default(omit) }}"
- api_token_secret: "{{ api_token_secret | default(omit) }}"
- validate_certs: "{{ validate_certs }}"
- register: results
-
-- assert:
- that:
- - results is not changed
- - results.proxmox_nodes is defined
- - results.proxmox_nodes|length >= 1
- - results.proxmox_nodes[0].type == 'node'
diff --git a/tests/integration/targets/proxmox_pool/tasks/main.yml b/tests/integration/targets/proxmox_pool/tasks/main.yml
deleted file mode 100644
index 2b22960f2c..0000000000
--- a/tests/integration/targets/proxmox_pool/tasks/main.yml
+++ /dev/null
@@ -1,220 +0,0 @@
-####################################################################
-# WARNING: These are designed specifically for Ansible tests #
-# and should not be used as examples of how to write Ansible roles #
-####################################################################
-
-# Copyright (c) 2023, Sergei Antipov
-# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-- name: Proxmox VE pool and pool membership management
- tags: ["pool"]
- block:
- - name: Make sure poolid parameter is not missing
- proxmox_pool:
- api_host: "{{ api_host }}"
- api_user: "{{ user }}@{{ domain }}"
- api_password: "{{ api_password | default(omit) }}"
- api_token_id: "{{ api_token_id | default(omit) }}"
- api_token_secret: "{{ api_token_secret | default(omit) }}"
- validate_certs: "{{ validate_certs }}"
- ignore_errors: true
- register: result
-
- - assert:
- that:
- - result is failed
- - "'missing required arguments: poolid' in result.msg"
-
- - name: Create pool (Check)
- proxmox_pool:
- api_host: "{{ api_host }}"
- api_user: "{{ user }}@{{ domain }}"
- api_password: "{{ api_password | default(omit) }}"
- api_token_id: "{{ api_token_id | default(omit) }}"
- api_token_secret: "{{ api_token_secret | default(omit) }}"
- validate_certs: "{{ validate_certs }}"
- poolid: "{{ poolid }}"
- check_mode: true
- register: result
-
- - assert:
- that:
- - result is changed
- - result is success
-
- - name: Create pool
- proxmox_pool:
- api_host: "{{ api_host }}"
- api_user: "{{ user }}@{{ domain }}"
- api_password: "{{ api_password | default(omit) }}"
- api_token_id: "{{ api_token_id | default(omit) }}"
- api_token_secret: "{{ api_token_secret | default(omit) }}"
- validate_certs: "{{ validate_certs }}"
- poolid: "{{ poolid }}"
- register: result
-
- - assert:
- that:
- - result is changed
- - result is success
- - result.poolid == "{{ poolid }}"
-
- - name: Delete pool (Check)
- proxmox_pool:
- api_host: "{{ api_host }}"
- api_user: "{{ user }}@{{ domain }}"
- api_password: "{{ api_password | default(omit) }}"
- api_token_id: "{{ api_token_id | default(omit) }}"
- api_token_secret: "{{ api_token_secret | default(omit) }}"
- validate_certs: "{{ validate_certs }}"
- poolid: "{{ poolid }}"
- state: absent
- check_mode: true
- register: result
-
- - assert:
- that:
- - result is changed
- - result is success
-
- - name: Delete non-existing pool should do nothing
- proxmox_pool:
- api_host: "{{ api_host }}"
- api_user: "{{ user }}@{{ domain }}"
- api_password: "{{ api_password | default(omit) }}"
- api_token_id: "{{ api_token_id | default(omit) }}"
- api_token_secret: "{{ api_token_secret | default(omit) }}"
- validate_certs: "{{ validate_certs }}"
- poolid: "non-existing-poolid"
- state: absent
- register: result
-
- - assert:
- that:
- - result is not changed
- - result is success
-
- - name: Deletion of non-empty pool fails
- block:
- - name: Add storage into pool
- proxmox_pool_member:
- api_host: "{{ api_host }}"
- api_user: "{{ user }}@{{ domain }}"
- api_password: "{{ api_password | default(omit) }}"
- api_token_id: "{{ api_token_id | default(omit) }}"
- api_token_secret: "{{ api_token_secret | default(omit) }}"
- validate_certs: "{{ validate_certs }}"
- poolid: "{{ poolid }}"
- member: "{{ member }}"
- type: "{{ member_type }}"
- diff: true
- register: result
-
- - assert:
- that:
- - result is changed
- - result is success
- - "'{{ member }}' in result.diff.after.members"
-
- - name: Add non-existing storage into pool should fail
- proxmox_pool_member:
- api_host: "{{ api_host }}"
- api_user: "{{ user }}@{{ domain }}"
- api_password: "{{ api_password | default(omit) }}"
- api_token_id: "{{ api_token_id | default(omit) }}"
- api_token_secret: "{{ api_token_secret | default(omit) }}"
- validate_certs: "{{ validate_certs }}"
- poolid: "{{ poolid }}"
- member: "non-existing-storage"
- type: "{{ member_type }}"
- ignore_errors: true
- register: result
-
- - assert:
- that:
- - result is failed
- - "'Storage non-existing-storage doesn\\'t exist in the cluster' in result.msg"
-
- - name: Delete non-empty pool
- proxmox_pool:
- api_host: "{{ api_host }}"
- api_user: "{{ user }}@{{ domain }}"
- api_password: "{{ api_password | default(omit) }}"
- api_token_id: "{{ api_token_id | default(omit) }}"
- api_token_secret: "{{ api_token_secret | default(omit) }}"
- validate_certs: "{{ validate_certs }}"
- poolid: "{{ poolid }}"
- state: absent
- ignore_errors: true
- register: result
-
- - assert:
- that:
- - result is failed
- - "'Please remove members from pool first.' in result.msg"
-
- - name: Delete storage from the pool
- proxmox_pool_member:
- api_host: "{{ api_host }}"
- api_user: "{{ user }}@{{ domain }}"
- api_password: "{{ api_password | default(omit) }}"
- api_token_id: "{{ api_token_id | default(omit) }}"
- api_token_secret: "{{ api_token_secret | default(omit) }}"
- validate_certs: "{{ validate_certs }}"
- poolid: "{{ poolid }}"
- member: "{{ member }}"
- type: "{{ member_type }}"
- state: absent
- register: result
-
- - assert:
- that:
- - result is success
- - result is changed
-
- rescue:
- - name: Delete storage from the pool if it is added
- proxmox_pool_member:
- api_host: "{{ api_host }}"
- api_user: "{{ user }}@{{ domain }}"
- api_password: "{{ api_password | default(omit) }}"
- api_token_id: "{{ api_token_id | default(omit) }}"
- api_token_secret: "{{ api_token_secret | default(omit) }}"
- validate_certs: "{{ validate_certs }}"
- poolid: "{{ poolid }}"
- member: "{{ member }}"
- type: "{{ member_type }}"
- state: absent
- ignore_errors: true
-
- - name: Delete pool
- proxmox_pool:
- api_host: "{{ api_host }}"
- api_user: "{{ user }}@{{ domain }}"
- api_password: "{{ api_password | default(omit) }}"
- api_token_id: "{{ api_token_id | default(omit) }}"
- api_token_secret: "{{ api_token_secret | default(omit) }}"
- validate_certs: "{{ validate_certs }}"
- poolid: "{{ poolid }}"
- state: absent
- register: result
-
- - assert:
- that:
- - result is changed
- - result is success
- - result.poolid == "{{ poolid }}"
-
- rescue:
- - name: Delete test pool if it is created
- proxmox_pool:
- api_host: "{{ api_host }}"
- api_user: "{{ user }}@{{ domain }}"
- api_password: "{{ api_password | default(omit) }}"
- api_token_id: "{{ api_token_id | default(omit) }}"
- api_token_secret: "{{ api_token_secret | default(omit) }}"
- validate_certs: "{{ validate_certs }}"
- poolid: "{{ poolid }}"
- state: absent
- ignore_errors: true
diff --git a/tests/integration/targets/proxmox_template/tasks/main.yml b/tests/integration/targets/proxmox_template/tasks/main.yml
deleted file mode 100644
index 2d1187e890..0000000000
--- a/tests/integration/targets/proxmox_template/tasks/main.yml
+++ /dev/null
@@ -1,136 +0,0 @@
-####################################################################
-# WARNING: These are designed specifically for Ansible tests #
-# and should not be used as examples of how to write Ansible roles #
-####################################################################
-
-# Copyright (c) 2023, Sergei Antipov
-# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-- name: Proxmox VE virtual machines templates management
- tags: ['template']
- vars:
- filename: /tmp/dummy.iso
- block:
- - name: Create dummy ISO file
- ansible.builtin.command:
- cmd: 'truncate -s 300M {{ filename }}'
-
- - name: Delete requests_toolbelt module if it is installed
- ansible.builtin.pip:
- name: requests_toolbelt
- state: absent
-
- - name: Install latest proxmoxer
- ansible.builtin.pip:
- name: proxmoxer
- state: latest
-
- - name: Upload ISO as template to Proxmox VE cluster should fail
- proxmox_template:
- api_host: '{{ api_host }}'
- api_user: '{{ user }}@{{ domain }}'
- api_password: '{{ api_password | default(omit) }}'
- api_token_id: '{{ api_token_id | default(omit) }}'
- api_token_secret: '{{ api_token_secret | default(omit) }}'
- validate_certs: '{{ validate_certs }}'
- node: '{{ node }}'
- src: '{{ filename }}'
- content_type: iso
- force: true
- register: result
- ignore_errors: true
-
- - assert:
- that:
- - result is failed
- - result.msg is match('\'requests_toolbelt\' module is required to upload files larger than 256MB')
-
- - name: Install old (1.1.2) version of proxmoxer
- ansible.builtin.pip:
- name: proxmoxer==1.1.1
- state: present
-
- - name: Upload ISO as template to Proxmox VE cluster should be successful
- proxmox_template:
- api_host: '{{ api_host }}'
- api_user: '{{ user }}@{{ domain }}'
- api_password: '{{ api_password | default(omit) }}'
- api_token_id: '{{ api_token_id | default(omit) }}'
- api_token_secret: '{{ api_token_secret | default(omit) }}'
- validate_certs: '{{ validate_certs }}'
- node: '{{ node }}'
- src: '{{ filename }}'
- content_type: iso
- force: true
- register: result
-
- - assert:
- that:
- - result is changed
- - result is success
- - result.msg is match('template with volid=local:iso/dummy.iso uploaded')
-
- - name: Install latest proxmoxer
- ansible.builtin.pip:
- name: proxmoxer
- state: latest
-
- - name: Make smaller dummy file
- ansible.builtin.command:
- cmd: 'truncate -s 128M {{ filename }}'
-
- - name: Upload ISO as template to Proxmox VE cluster should be successful
- proxmox_template:
- api_host: '{{ api_host }}'
- api_user: '{{ user }}@{{ domain }}'
- api_password: '{{ api_password | default(omit) }}'
- api_token_id: '{{ api_token_id | default(omit) }}'
- api_token_secret: '{{ api_token_secret | default(omit) }}'
- validate_certs: '{{ validate_certs }}'
- node: '{{ node }}'
- src: '{{ filename }}'
- content_type: iso
- force: true
- register: result
-
- - assert:
- that:
- - result is changed
- - result is success
- - result.msg is match('template with volid=local:iso/dummy.iso uploaded')
-
- - name: Install requests_toolbelt
- ansible.builtin.pip:
- name: requests_toolbelt
- state: present
-
- - name: Make big dummy file
- ansible.builtin.command:
- cmd: 'truncate -s 300M {{ filename }}'
-
- - name: Upload ISO as template to Proxmox VE cluster should be successful
- proxmox_template:
- api_host: '{{ api_host }}'
- api_user: '{{ user }}@{{ domain }}'
- api_password: '{{ api_password | default(omit) }}'
- api_token_id: '{{ api_token_id | default(omit) }}'
- api_token_secret: '{{ api_token_secret | default(omit) }}'
- validate_certs: '{{ validate_certs }}'
- node: '{{ node }}'
- src: '{{ filename }}'
- content_type: iso
- force: true
- register: result
-
- - assert:
- that:
- - result is changed
- - result is success
- - result.msg is match('template with volid=local:iso/dummy.iso uploaded')
-
- always:
- - name: Delete ISO file from host
- ansible.builtin.file:
- path: '{{ filename }}'
- state: absent
diff --git a/tests/integration/targets/read_csv/tasks/main.yml b/tests/integration/targets/read_csv/tasks/main.yml
index c09349dd5b..f8b46a3e62 100644
--- a/tests/integration/targets/read_csv/tasks/main.yml
+++ b/tests/integration/targets/read_csv/tasks/main.yml
@@ -26,14 +26,14 @@
- assert:
that:
- - users_unique.dict.dag.name == 'dag'
- - users_unique.dict.dag.gecos == 'Dag Wieërs'
- - users_unique.dict.dag.uid == '500'
- - users_unique.dict.dag.gid == '500'
- - users_unique.dict.jeroen.name == 'jeroen'
- - users_unique.dict.jeroen.gecos == 'Jeroen Hoekx'
- - users_unique.dict.jeroen.uid == '501'
- - users_unique.dict.jeroen.gid == '500'
+ - users_unique.dict.dag.name == 'dag'
+ - users_unique.dict.dag.gecos == 'Dag Wieërs'
+ - users_unique.dict.dag.uid == '500'
+ - users_unique.dict.dag.gid == '500'
+ - users_unique.dict.jeroen.name == 'jeroen'
+ - users_unique.dict.jeroen.gecos == 'Jeroen Hoekx'
+ - users_unique.dict.jeroen.uid == '501'
+ - users_unique.dict.jeroen.gid == '500'
# Read a CSV file and access the first item
- name: Read users from CSV file and return a list
@@ -43,14 +43,14 @@
- assert:
that:
- - users_unique.list.0.name == 'dag'
- - users_unique.list.0.gecos == 'Dag Wieërs'
- - users_unique.list.0.uid == '500'
- - users_unique.list.0.gid == '500'
- - users_unique.list.1.name == 'jeroen'
- - users_unique.list.1.gecos == 'Jeroen Hoekx'
- - users_unique.list.1.uid == '501'
- - users_unique.list.1.gid == '500'
+ - users_unique.list.0.name == 'dag'
+ - users_unique.list.0.gecos == 'Dag Wieërs'
+ - users_unique.list.0.uid == '500'
+ - users_unique.list.0.gid == '500'
+ - users_unique.list.1.name == 'jeroen'
+ - users_unique.list.1.gecos == 'Jeroen Hoekx'
+ - users_unique.list.1.uid == '501'
+ - users_unique.list.1.gid == '500'
# Create basic CSV file using semi-colon
@@ -74,14 +74,14 @@
- assert:
that:
- - users_nonunique.dict.dag.name == 'dag'
- - users_nonunique.dict.dag.gecos == 'Dag Wieers'
- - users_nonunique.dict.dag.uid == '502'
- - users_nonunique.dict.dag.gid == '500'
- - users_nonunique.dict.jeroen.name == 'jeroen'
- - users_nonunique.dict.jeroen.gecos == 'Jeroen Hoekx'
- - users_nonunique.dict.jeroen.uid == '501'
- - users_nonunique.dict.jeroen.gid == '500'
+ - users_nonunique.dict.dag.name == 'dag'
+ - users_nonunique.dict.dag.gecos == 'Dag Wieers'
+ - users_nonunique.dict.dag.uid == '502'
+ - users_nonunique.dict.dag.gid == '500'
+ - users_nonunique.dict.jeroen.name == 'jeroen'
+ - users_nonunique.dict.jeroen.gecos == 'Jeroen Hoekx'
+ - users_nonunique.dict.jeroen.uid == '501'
+ - users_nonunique.dict.jeroen.gid == '500'
# Read a CSV file using an non-existing dialect
@@ -94,8 +94,8 @@
- assert:
that:
- - users_placebo is failed
- - users_placebo.msg == "Dialect 'placebo' is not supported by your version of python."
+ - users_placebo is failed
+ - users_placebo.msg == "Dialect 'placebo' is not supported by your version of python."
# Create basic CSV file without header
@@ -116,14 +116,14 @@
- assert:
that:
- - users_noheader.dict.dag.name == 'dag'
- - users_noheader.dict.dag.gecos == 'Dag Wieërs'
- - users_noheader.dict.dag.uid == '500'
- - users_noheader.dict.dag.gid == '500'
- - users_noheader.dict.jeroen.name == 'jeroen'
- - users_noheader.dict.jeroen.gecos == 'Jeroen Hoekx'
- - users_noheader.dict.jeroen.uid == '501'
- - users_noheader.dict.jeroen.gid == '500'
+ - users_noheader.dict.dag.name == 'dag'
+ - users_noheader.dict.dag.gecos == 'Dag Wieërs'
+ - users_noheader.dict.dag.uid == '500'
+ - users_noheader.dict.dag.gid == '500'
+ - users_noheader.dict.jeroen.name == 'jeroen'
+ - users_noheader.dict.jeroen.gecos == 'Jeroen Hoekx'
+ - users_noheader.dict.jeroen.uid == '501'
+ - users_noheader.dict.jeroen.gid == '500'
# Create broken file
@@ -146,8 +146,8 @@
- assert:
that:
- - users_broken is failed
- - "'Unable to process file' in users_broken.msg"
+ - users_broken is failed
+ - "'Unable to process file' in users_broken.msg"
# Create basic CSV file with BOM
- name: Create unique CSV file with BOM
@@ -166,11 +166,11 @@
- assert:
that:
- - users_bom.list.0.name == 'dag'
- - users_bom.list.0.gecos == 'Dag Wieërs'
- - users_bom.list.0.uid == '500'
- - users_bom.list.0.gid == '500'
- - users_bom.list.1.name == 'jeroen'
- - users_bom.list.1.gecos == 'Jeroen Hoekx'
- - users_bom.list.1.uid == '501'
- - users_bom.list.1.gid == '500'
+ - users_bom.list.0.name == 'dag'
+ - users_bom.list.0.gecos == 'Dag Wieërs'
+ - users_bom.list.0.uid == '500'
+ - users_bom.list.0.gid == '500'
+ - users_bom.list.1.name == 'jeroen'
+ - users_bom.list.1.gecos == 'Jeroen Hoekx'
+ - users_bom.list.1.uid == '501'
+ - users_bom.list.1.gid == '500'
diff --git a/tests/integration/targets/redis_info/meta/main.yml b/tests/integration/targets/redis_info/meta/main.yml
index cd516fd239..404b9ba31c 100644
--- a/tests/integration/targets/redis_info/meta/main.yml
+++ b/tests/integration/targets/redis_info/meta/main.yml
@@ -4,4 +4,4 @@
# SPDX-License-Identifier: GPL-3.0-or-later
dependencies:
-- setup_redis_replication
+ - setup_redis_replication
diff --git a/tests/integration/targets/redis_info/tasks/main.yml b/tests/integration/targets/redis_info/tasks/main.yml
index 4a11de3650..52263ecc4f 100644
--- a/tests/integration/targets/redis_info/tasks/main.yml
+++ b/tests/integration/targets/redis_info/tasks/main.yml
@@ -14,10 +14,10 @@
- assert:
that:
- - result is not changed
- - result.info is defined
- - result.info.tcp_port == master_port
- - result.info.role == 'master'
+ - result is not changed
+ - result.info is defined
+ - result.info.tcp_port == master_port
+ - result.info.role == 'master'
- name: redis_info - connect to master (check)
community.general.redis_info:
@@ -29,10 +29,10 @@
- assert:
that:
- - result is not changed
- - result.info is defined
- - result.info.tcp_port == master_port
- - result.info.role == 'master'
+ - result is not changed
+ - result.info is defined
+ - result.info.tcp_port == master_port
+ - result.info.role == 'master'
- name: redis_info - connect to replica
community.general.redis_info:
@@ -42,7 +42,7 @@
- assert:
that:
- - result is not changed
- - result.info is defined
- - result.info.tcp_port == replica_port
- - result.info.role == 'slave'
+ - result is not changed
+ - result.info is defined
+ - result.info.tcp_port == replica_port
+ - result.info.role == 'slave'
diff --git a/tests/integration/targets/rundeck/defaults/main.yml b/tests/integration/targets/rundeck/defaults/main.yml
index 4d7ea31468..503f627857 100644
--- a/tests/integration/targets/rundeck/defaults/main.yml
+++ b/tests/integration/targets/rundeck/defaults/main.yml
@@ -6,3 +6,32 @@
rundeck_url: http://localhost:4440
rundeck_api_version: 39
rundeck_job_id: 3b8a6e54-69fb-42b7-b98f-f82e59238478
+
+system_acl_policy: |
+ description: Test ACL
+ context:
+ application: 'rundeck'
+ for:
+ project:
+ - allow:
+ - read
+ by:
+ group:
+ - users
+
+project_acl_policy: |
+ description: Test project acl
+ for:
+ resource:
+ - equals:
+ kind: node
+ allow: [read,refresh]
+ - equals:
+ kind: event
+ allow: [read]
+ job:
+ - allow: [run,kill]
+ node:
+ - allow: [read,run]
+ by:
+ group: users
diff --git a/tests/integration/targets/rundeck/files/test_job.yaml b/tests/integration/targets/rundeck/files/test_job.yaml
index baa852ecce..073e11fd19 100644
--- a/tests/integration/targets/rundeck/files/test_job.yaml
+++ b/tests/integration/targets/rundeck/files/test_job.yaml
@@ -11,18 +11,18 @@
name: test_job
nodeFilterEditable: false
options:
- - label: Exit Code
- name: exit_code
- value: '0'
- - label: Sleep
- name: sleep
- value: '1'
+ - label: Exit Code
+ name: exit_code
+ value: '0'
+ - label: Sleep
+ name: sleep
+ value: '1'
plugins:
ExecutionLifecycle: null
scheduleEnabled: true
sequence:
commands:
- - exec: sleep $RD_OPTION_SLEEP && echo "Test done!" && exit $RD_OPTION_EXIT_CODE
+ - exec: sleep $RD_OPTION_SLEEP && echo "Test done!" && exit $RD_OPTION_EXIT_CODE
keepgoing: false
strategy: node-first
uuid: 3b8a6e54-69fb-42b7-b98f-f82e59238478
diff --git a/tests/integration/targets/rundeck/meta/main.yml b/tests/integration/targets/rundeck/meta/main.yml
index c125e4046a..31dd1cfb9d 100644
--- a/tests/integration/targets/rundeck/meta/main.yml
+++ b/tests/integration/targets/rundeck/meta/main.yml
@@ -4,4 +4,4 @@
# SPDX-License-Identifier: GPL-3.0-or-later
dependencies:
-- setup_rundeck
+ - setup_rundeck
diff --git a/tests/integration/targets/rundeck/tasks/main.yml b/tests/integration/targets/rundeck/tasks/main.yml
index e42780b9b7..7762832d10 100644
--- a/tests/integration/targets/rundeck/tasks/main.yml
+++ b/tests/integration/targets/rundeck/tasks/main.yml
@@ -15,6 +15,9 @@
RD_USER: admin
RD_PASSWORD: admin
register: rundeck_api_token
+ retries: 3
+ until: rundeck_api_token.rc == 0
+ changed_when: true
- name: Create a Rundeck project
community.general.rundeck_project:
@@ -24,6 +27,71 @@
token: "{{ rundeck_api_token.stdout_lines[-1] }}"
state: present
+- name: Create a system ACL
+ community.general.rundeck_acl_policy:
+ name: test_acl
+ api_version: "{{ rundeck_api_version }}"
+ url: "{{ rundeck_url }}"
+ token: "{{ rundeck_api_token.stdout_lines[-1] }}"
+ state: present
+ policy: "{{ system_acl_policy }}"
+
+- name: Create a project ACL
+ community.general.rundeck_acl_policy:
+ name: test_acl
+ api_version: "{{ rundeck_api_version }}"
+ url: "{{ rundeck_url }}"
+ token: "{{ rundeck_api_token.stdout_lines[-1] }}"
+ state: present
+ policy: "{{ project_acl_policy }}"
+ project: test_project
+
+- name: Retrieve ACLs
+ ansible.builtin.uri:
+ url: "{{ rundeck_url }}/api/{{ rundeck_api_version }}/{{ item }}"
+ headers:
+ accept: application/json
+ x-rundeck-auth-token: "{{ rundeck_api_token.stdout_lines[-1] }}"
+ register: acl_policy_check
+ loop:
+ - system/acl/test_acl.aclpolicy
+ - project/test_project/acl/test_acl.aclpolicy
+
+- name: Assert ACL content is correct
+ ansible.builtin.assert:
+ that:
+ - acl_policy_check['results'][0]['json']['contents'] == system_acl_policy
+ - acl_policy_check['results'][1]['json']['contents'] == project_acl_policy
+
+- name: Remove system ACL
+ community.general.rundeck_acl_policy:
+ name: test_acl
+ api_version: "{{ rundeck_api_version }}"
+ url: "{{ rundeck_url }}"
+ token: "{{ rundeck_api_token.stdout_lines[-1] }}"
+ state: absent
+
+- name: Remove project ACL
+ community.general.rundeck_acl_policy:
+ name: test_acl
+ api_version: "{{ rundeck_api_version }}"
+ url: "{{ rundeck_url }}"
+ token: "{{ rundeck_api_token.stdout_lines[-1] }}"
+ state: absent
+ project: test_project
+
+- name: Check that ACLs have been removed
+ ansible.builtin.uri:
+ url: "{{ rundeck_url }}/api/{{ rundeck_api_version }}/{{ item }}"
+ headers:
+ accept: application/json
+ x-rundeck-auth-token: "{{ rundeck_api_token.stdout_lines[-1] }}"
+ status_code:
+ - 404
+ loop:
+ - system/acl/test_acl.aclpolicy
+ - project/test_project/acl/test_acl.aclpolicy
+
- name: Copy test_job definition to /tmp
copy:
src: test_job.yaml
diff --git a/tests/integration/targets/scaleway_compute/tasks/security_group.yml b/tests/integration/targets/scaleway_compute/tasks/security_group.yml
index 59f81e6af1..971fae6076 100644
--- a/tests/integration/targets/scaleway_compute/tasks/security_group.yml
+++ b/tests/integration/targets/scaleway_compute/tasks/security_group.yml
@@ -19,105 +19,105 @@
- debug: var=security_group
- block:
- - name: Create a server with security_group (Check)
- check_mode: true
- scaleway_compute:
- name: '{{ scaleway_name }}'
- state: present
- image: '{{ scaleway_image_id }}'
- organization: '{{ scaleway_organization }}'
- region: '{{ scaleway_region }}'
- commercial_type: '{{ scaleway_commerial_type }}'
- security_group: '{{ security_group.scaleway_security_group.id }}'
+ - name: Create a server with security_group (Check)
+ check_mode: true
+ scaleway_compute:
+ name: '{{ scaleway_name }}'
+ state: present
+ image: '{{ scaleway_image_id }}'
+ organization: '{{ scaleway_organization }}'
+ region: '{{ scaleway_region }}'
+ commercial_type: '{{ scaleway_commerial_type }}'
+ security_group: '{{ security_group.scaleway_security_group.id }}'
- register: server_creation_check_task
+ register: server_creation_check_task
- - debug: var=server_creation_check_task
+ - debug: var=server_creation_check_task
- - assert:
- that:
- - server_creation_check_task is success
- - server_creation_check_task is changed
+ - assert:
+ that:
+ - server_creation_check_task is success
+ - server_creation_check_task is changed
- - name: Create a server
- scaleway_compute:
- name: '{{ scaleway_name }}'
- state: present
- image: '{{ scaleway_image_id }}'
- organization: '{{ scaleway_organization }}'
- region: '{{ scaleway_region }}'
- commercial_type: '{{ scaleway_commerial_type }}'
- security_group: '{{ security_group.scaleway_security_group.id }}'
- wait: true
+ - name: Create a server
+ scaleway_compute:
+ name: '{{ scaleway_name }}'
+ state: present
+ image: '{{ scaleway_image_id }}'
+ organization: '{{ scaleway_organization }}'
+ region: '{{ scaleway_region }}'
+ commercial_type: '{{ scaleway_commerial_type }}'
+ security_group: '{{ security_group.scaleway_security_group.id }}'
+ wait: true
- register: server_creation_task
+ register: server_creation_task
- - debug: var=server_creation_task
+ - debug: var=server_creation_task
- - assert:
- that:
- - server_creation_task is success
- - server_creation_task is changed
+ - assert:
+ that:
+ - server_creation_task is success
+ - server_creation_task is changed
- - name: Create a server with security_group (Confirmation)
- scaleway_compute:
- name: '{{ scaleway_name }}'
- state: present
- image: '{{ scaleway_image_id }}'
- organization: '{{ scaleway_organization }}'
- region: '{{ scaleway_region }}'
- commercial_type: '{{ scaleway_commerial_type }}'
- security_group: '{{ security_group.scaleway_security_group.id }}'
- wait: true
+ - name: Create a server with security_group (Confirmation)
+ scaleway_compute:
+ name: '{{ scaleway_name }}'
+ state: present
+ image: '{{ scaleway_image_id }}'
+ organization: '{{ scaleway_organization }}'
+ region: '{{ scaleway_region }}'
+ commercial_type: '{{ scaleway_commerial_type }}'
+ security_group: '{{ security_group.scaleway_security_group.id }}'
+ wait: true
- register: server_creation_confirmation_task
+ register: server_creation_confirmation_task
- - debug: var=server_creation_confirmation_task
+ - debug: var=server_creation_confirmation_task
- - assert:
- that:
- - server_creation_confirmation_task is success
- - server_creation_confirmation_task is not changed
+ - assert:
+ that:
+ - server_creation_confirmation_task is success
+ - server_creation_confirmation_task is not changed
- - name: Keep current security_group (Check)
- check_mode: true
- scaleway_compute:
- name: '{{ scaleway_name }}'
- state: present
- image: '{{ scaleway_image_id }}'
- organization: '{{ scaleway_organization }}'
- region: '{{ scaleway_region }}'
- commercial_type: '{{ scaleway_commerial_type }}'
- security_group: '{{ security_group.scaleway_security_group.id }}'
- wait: true
+ - name: Keep current security_group (Check)
+ check_mode: true
+ scaleway_compute:
+ name: '{{ scaleway_name }}'
+ state: present
+ image: '{{ scaleway_image_id }}'
+ organization: '{{ scaleway_organization }}'
+ region: '{{ scaleway_region }}'
+ commercial_type: '{{ scaleway_commerial_type }}'
+ security_group: '{{ security_group.scaleway_security_group.id }}'
+ wait: true
- register: server_creation_confirmation_task
+ register: server_creation_confirmation_task
- - debug: var=server_creation_confirmation_task
+ - debug: var=server_creation_confirmation_task
- - assert:
- that:
- - server_creation_confirmation_task is success
- - server_creation_confirmation_task is not changed
+ - assert:
+ that:
+ - server_creation_confirmation_task is success
+ - server_creation_confirmation_task is not changed
- - name: Keep current security_group
- scaleway_compute:
- name: '{{ scaleway_name }}'
- state: present
- image: '{{ scaleway_image_id }}'
- organization: '{{ scaleway_organization }}'
- region: '{{ scaleway_region }}'
- commercial_type: '{{ scaleway_commerial_type }}'
- wait: true
+ - name: Keep current security_group
+ scaleway_compute:
+ name: '{{ scaleway_name }}'
+ state: present
+ image: '{{ scaleway_image_id }}'
+ organization: '{{ scaleway_organization }}'
+ region: '{{ scaleway_region }}'
+ commercial_type: '{{ scaleway_commerial_type }}'
+ wait: true
- register: server_creation_confirmation_task
+ register: server_creation_confirmation_task
- - debug: var=server_creation_confirmation_task
+ - debug: var=server_creation_confirmation_task
- - assert:
- that:
- - server_creation_confirmation_task is success
- - server_creation_confirmation_task is not changed
+ - assert:
+ that:
+ - server_creation_confirmation_task is success
+ - server_creation_confirmation_task is not changed
always:
- name: Destroy it
diff --git a/tests/integration/targets/scaleway_container_namespace/defaults/main.yml b/tests/integration/targets/scaleway_container_namespace/defaults/main.yml
index 876f8b7a63..de5cb3005a 100644
--- a/tests/integration/targets/scaleway_container_namespace/defaults/main.yml
+++ b/tests/integration/targets/scaleway_container_namespace/defaults/main.yml
@@ -10,6 +10,6 @@ updated_description: Container namespace used for testing scaleway_container_nam
environment_variables:
MY_VAR: my_value
secret_environment_variables:
- MY_SECRET_VAR: my_secret_value
+ MY_SECRET_VAR: my_secret_value
updated_secret_environment_variables:
MY_SECRET_VAR: my_other_secret_value
\ No newline at end of file
diff --git a/tests/integration/targets/scaleway_security_group/tasks/main.yml b/tests/integration/targets/scaleway_security_group/tasks/main.yml
index cab972ae50..40140e0b1f 100644
--- a/tests/integration/targets/scaleway_security_group/tasks/main.yml
+++ b/tests/integration/targets/scaleway_security_group/tasks/main.yml
@@ -31,69 +31,69 @@
- security_group_creation is changed
- block:
- - name: Create security group
- scaleway_security_group:
- state: present
- region: '{{ scaleway_region }}'
- name: security_group
- description: 'my security group description'
- organization: '{{ scaleway_organization }}'
- stateful: false
- inbound_default_policy: accept
- outbound_default_policy: accept
- organization_default: false
- register: security_group_creation
+ - name: Create security group
+ scaleway_security_group:
+ state: present
+ region: '{{ scaleway_region }}'
+ name: security_group
+ description: 'my security group description'
+ organization: '{{ scaleway_organization }}'
+ stateful: false
+ inbound_default_policy: accept
+ outbound_default_policy: accept
+ organization_default: false
+ register: security_group_creation
- - debug: var=security_group_creation
+ - debug: var=security_group_creation
- - name: Ensure security groups facts is success
- assert:
- that:
- - security_group_creation is success
- - security_group_creation is changed
+ - name: Ensure security groups facts is success
+ assert:
+ that:
+ - security_group_creation is success
+ - security_group_creation is changed
- - name: Create security group duplicate
- scaleway_security_group:
- state: present
- region: '{{ scaleway_region }}'
- name: security_group
- description: 'my security group description'
- organization: '{{ scaleway_organization }}'
- stateful: false
- inbound_default_policy: accept
- outbound_default_policy: accept
- organization_default: false
- register: security_group_creation
+ - name: Create security group duplicate
+ scaleway_security_group:
+ state: present
+ region: '{{ scaleway_region }}'
+ name: security_group
+ description: 'my security group description'
+ organization: '{{ scaleway_organization }}'
+ stateful: false
+ inbound_default_policy: accept
+ outbound_default_policy: accept
+ organization_default: false
+ register: security_group_creation
- - debug: var=security_group_creation
+ - debug: var=security_group_creation
- - name: Ensure security groups duplicate facts is success
- assert:
- that:
- - security_group_creation is success
- - security_group_creation is not changed
+ - name: Ensure security groups duplicate facts is success
+ assert:
+ that:
+ - security_group_creation is success
+ - security_group_creation is not changed
- - name: Delete security group check
- check_mode: true
- scaleway_security_group:
- state: absent
- region: '{{ scaleway_region }}'
- name: security_group
- description: 'my security group description'
- organization: '{{ scaleway_organization }}'
- stateful: false
- inbound_default_policy: accept
- outbound_default_policy: accept
- organization_default: false
- register: security_group_deletion
+ - name: Delete security group check
+ check_mode: true
+ scaleway_security_group:
+ state: absent
+ region: '{{ scaleway_region }}'
+ name: security_group
+ description: 'my security group description'
+ organization: '{{ scaleway_organization }}'
+ stateful: false
+ inbound_default_policy: accept
+ outbound_default_policy: accept
+ organization_default: false
+ register: security_group_deletion
- - debug: var=security_group_deletion
+ - debug: var=security_group_deletion
- - name: Ensure security groups delete check facts is success
- assert:
- that:
- - security_group_deletion is success
- - security_group_deletion is changed
+ - name: Ensure security groups delete check facts is success
+ assert:
+ that:
+ - security_group_deletion is success
+ - security_group_deletion is changed
always:
- name: Delete security group
diff --git a/tests/integration/targets/scaleway_security_group_rule/tasks/main.yml b/tests/integration/targets/scaleway_security_group_rule/tasks/main.yml
index 3839421955..a438e4be57 100644
--- a/tests/integration/targets/scaleway_security_group_rule/tasks/main.yml
+++ b/tests/integration/targets/scaleway_security_group_rule/tasks/main.yml
@@ -44,83 +44,83 @@
- security_group_rule_creation_task is changed
- block:
- - name: Create security_group_rule check
- scaleway_security_group_rule:
- state: present
- region: '{{ scaleway_region }}'
- protocol: '{{ protocol }}'
- port: '{{ port }}'
- ip_range: '{{ ip_range }}'
- direction: '{{ direction }}'
- action: '{{ action }}'
- security_group: '{{ security_group.scaleway_security_group.id }}'
- register: security_group_rule_creation_task
+ - name: Create security_group_rule check
+ scaleway_security_group_rule:
+ state: present
+ region: '{{ scaleway_region }}'
+ protocol: '{{ protocol }}'
+ port: '{{ port }}'
+ ip_range: '{{ ip_range }}'
+ direction: '{{ direction }}'
+ action: '{{ action }}'
+ security_group: '{{ security_group.scaleway_security_group.id }}'
+ register: security_group_rule_creation_task
- - debug: var=security_group_rule_creation_task
+ - debug: var=security_group_rule_creation_task
- - assert:
- that:
- - security_group_rule_creation_task is success
- - security_group_rule_creation_task is changed
+ - assert:
+ that:
+ - security_group_rule_creation_task is success
+ - security_group_rule_creation_task is changed
- - name: Create security_group_rule duplicate
- scaleway_security_group_rule:
- state: present
- region: '{{ scaleway_region }}'
- protocol: '{{ protocol }}'
- port: '{{ port }}'
- ip_range: '{{ ip_range }}'
- direction: '{{ direction }}'
- action: '{{ action }}'
- security_group: '{{ security_group.scaleway_security_group.id }}'
- register: security_group_rule_creation_task
+ - name: Create security_group_rule duplicate
+ scaleway_security_group_rule:
+ state: present
+ region: '{{ scaleway_region }}'
+ protocol: '{{ protocol }}'
+ port: '{{ port }}'
+ ip_range: '{{ ip_range }}'
+ direction: '{{ direction }}'
+ action: '{{ action }}'
+ security_group: '{{ security_group.scaleway_security_group.id }}'
+ register: security_group_rule_creation_task
- - debug: var=security_group_rule_creation_task
+ - debug: var=security_group_rule_creation_task
- - assert:
- that:
- - security_group_rule_creation_task is success
- - security_group_rule_creation_task is not changed
+ - assert:
+ that:
+ - security_group_rule_creation_task is success
+ - security_group_rule_creation_task is not changed
- - name: Delete security_group_rule check
- check_mode: true
- scaleway_security_group_rule:
- state: absent
- region: '{{ scaleway_region }}'
- protocol: '{{ protocol }}'
- port: '{{ port }}'
- ip_range: '{{ ip_range }}'
- direction: '{{ direction }}'
- action: '{{ action }}'
- security_group: '{{ security_group.scaleway_security_group.id }}'
- register: security_group_rule_deletion_task
+ - name: Delete security_group_rule check
+ check_mode: true
+ scaleway_security_group_rule:
+ state: absent
+ region: '{{ scaleway_region }}'
+ protocol: '{{ protocol }}'
+ port: '{{ port }}'
+ ip_range: '{{ ip_range }}'
+ direction: '{{ direction }}'
+ action: '{{ action }}'
+ security_group: '{{ security_group.scaleway_security_group.id }}'
+ register: security_group_rule_deletion_task
- - debug: var=security_group_rule_deletion_task
+ - debug: var=security_group_rule_deletion_task
- - assert:
- that:
- - security_group_rule_deletion_task is success
- - security_group_rule_deletion_task is changed
+ - assert:
+ that:
+ - security_group_rule_deletion_task is success
+ - security_group_rule_deletion_task is changed
always:
- - name: Delete security_group_rule check
- scaleway_security_group_rule:
- state: absent
- region: '{{ scaleway_region }}'
- protocol: '{{ protocol }}'
- port: '{{ port }}'
- ip_range: '{{ ip_range }}'
- direction: '{{ direction }}'
- action: '{{ action }}'
- security_group: '{{ security_group.scaleway_security_group.id }}'
- register: security_group_rule_deletion_task
+ - name: Delete security_group_rule check
+ scaleway_security_group_rule:
+ state: absent
+ region: '{{ scaleway_region }}'
+ protocol: '{{ protocol }}'
+ port: '{{ port }}'
+ ip_range: '{{ ip_range }}'
+ direction: '{{ direction }}'
+ action: '{{ action }}'
+ security_group: '{{ security_group.scaleway_security_group.id }}'
+ register: security_group_rule_deletion_task
- - debug: var=security_group_rule_deletion_task
+ - debug: var=security_group_rule_deletion_task
- - assert:
- that:
- - security_group_rule_deletion_task is success
- - security_group_rule_deletion_task is changed
+ - assert:
+ that:
+ - security_group_rule_deletion_task is success
+ - security_group_rule_deletion_task is changed
- name: Delete security_group_rule check
scaleway_security_group_rule:
@@ -142,83 +142,83 @@
- security_group_rule_deletion_task is not changed
- block:
- - name: Create security_group_rule with null check
- scaleway_security_group_rule:
- state: present
- region: '{{ scaleway_region }}'
- protocol: '{{ protocol }}'
- port: null
- ip_range: '{{ ip_range }}'
- direction: '{{ direction }}'
- action: '{{ action }}'
- security_group: '{{ security_group.scaleway_security_group.id }}'
- register: security_group_rule_creation_task
+ - name: Create security_group_rule with null check
+ scaleway_security_group_rule:
+ state: present
+ region: '{{ scaleway_region }}'
+ protocol: '{{ protocol }}'
+ port: null
+ ip_range: '{{ ip_range }}'
+ direction: '{{ direction }}'
+ action: '{{ action }}'
+ security_group: '{{ security_group.scaleway_security_group.id }}'
+ register: security_group_rule_creation_task
- - debug: var=security_group_rule_creation_task
+ - debug: var=security_group_rule_creation_task
- - assert:
- that:
- - security_group_rule_creation_task is success
- - security_group_rule_creation_task is changed
+ - assert:
+ that:
+ - security_group_rule_creation_task is success
+ - security_group_rule_creation_task is changed
- - name: Create security_group_rule with null duplicate
- scaleway_security_group_rule:
- state: present
- region: '{{ scaleway_region }}'
- protocol: '{{ protocol }}'
- port: null
- ip_range: '{{ ip_range }}'
- direction: '{{ direction }}'
- action: '{{ action }}'
- security_group: '{{ security_group.scaleway_security_group.id }}'
- register: security_group_rule_creation_task
+ - name: Create security_group_rule with null duplicate
+ scaleway_security_group_rule:
+ state: present
+ region: '{{ scaleway_region }}'
+ protocol: '{{ protocol }}'
+ port: null
+ ip_range: '{{ ip_range }}'
+ direction: '{{ direction }}'
+ action: '{{ action }}'
+ security_group: '{{ security_group.scaleway_security_group.id }}'
+ register: security_group_rule_creation_task
- - debug: var=security_group_rule_creation_task
+ - debug: var=security_group_rule_creation_task
- - assert:
- that:
- - security_group_rule_creation_task is success
- - security_group_rule_creation_task is not changed
+ - assert:
+ that:
+ - security_group_rule_creation_task is success
+ - security_group_rule_creation_task is not changed
- - name: Delete security_group_rule with null check
- check_mode: true
- scaleway_security_group_rule:
- state: absent
- region: '{{ scaleway_region }}'
- protocol: '{{ protocol }}'
- port: null
- ip_range: '{{ ip_range }}'
- direction: '{{ direction }}'
- action: '{{ action }}'
- security_group: '{{ security_group.scaleway_security_group.id }}'
- register: security_group_rule_deletion_task
+ - name: Delete security_group_rule with null check
+ check_mode: true
+ scaleway_security_group_rule:
+ state: absent
+ region: '{{ scaleway_region }}'
+ protocol: '{{ protocol }}'
+ port: null
+ ip_range: '{{ ip_range }}'
+ direction: '{{ direction }}'
+ action: '{{ action }}'
+ security_group: '{{ security_group.scaleway_security_group.id }}'
+ register: security_group_rule_deletion_task
- - debug: var=security_group_rule_deletion_task
+ - debug: var=security_group_rule_deletion_task
- - assert:
- that:
- - security_group_rule_deletion_task is success
- - security_group_rule_deletion_task is changed
+ - assert:
+ that:
+ - security_group_rule_deletion_task is success
+ - security_group_rule_deletion_task is changed
always:
- - name: Delete security_group_rule with null check
- scaleway_security_group_rule:
- state: absent
- region: '{{ scaleway_region }}'
- protocol: '{{ protocol }}'
- port: null
- ip_range: '{{ ip_range }}'
- direction: '{{ direction }}'
- action: '{{ action }}'
- security_group: '{{ security_group.scaleway_security_group.id }}'
- register: security_group_rule_deletion_task
+ - name: Delete security_group_rule with null check
+ scaleway_security_group_rule:
+ state: absent
+ region: '{{ scaleway_region }}'
+ protocol: '{{ protocol }}'
+ port: null
+ ip_range: '{{ ip_range }}'
+ direction: '{{ direction }}'
+ action: '{{ action }}'
+ security_group: '{{ security_group.scaleway_security_group.id }}'
+ register: security_group_rule_deletion_task
- - debug: var=security_group_rule_deletion_task
+ - debug: var=security_group_rule_deletion_task
- - assert:
- that:
- - security_group_rule_deletion_task is success
- - security_group_rule_deletion_task is changed
+ - assert:
+ that:
+ - security_group_rule_deletion_task is success
+ - security_group_rule_deletion_task is changed
- name: Delete security_group_rule with null check
scaleway_security_group_rule:
diff --git a/tests/integration/targets/sefcontext/tasks/sefcontext.yml b/tests/integration/targets/sefcontext/tasks/sefcontext.yml
index 258f1ace91..ae815a22f8 100644
--- a/tests/integration/targets/sefcontext/tasks/sefcontext.yml
+++ b/tests/integration/targets/sefcontext/tasks/sefcontext.yml
@@ -38,8 +38,8 @@
- assert:
that:
- - first is changed
- - first.setype == 'httpd_sys_content_t'
+ - first is changed
+ - first.setype == 'httpd_sys_content_t'
- name: Set SELinux file context of foo/bar (again)
sefcontext:
@@ -51,8 +51,8 @@
- assert:
that:
- - second is not changed
- - second.setype == 'httpd_sys_content_t'
+ - second is not changed
+ - second.setype == 'httpd_sys_content_t'
- name: Change SELinux file context of foo/bar
sefcontext:
@@ -64,8 +64,8 @@
- assert:
that:
- - third is changed
- - third.setype == 'unlabeled_t'
+ - third is changed
+ - third.setype == 'unlabeled_t'
- name: Change SELinux file context of foo/bar (again)
sefcontext:
@@ -77,8 +77,8 @@
- assert:
that:
- - fourth is not changed
- - fourth.setype == 'unlabeled_t'
+ - fourth is not changed
+ - fourth.setype == 'unlabeled_t'
- name: Delete SELinux file context of foo/bar
sefcontext:
@@ -90,8 +90,8 @@
- assert:
that:
- - fifth is changed
- - fifth.setype == 'httpd_sys_content_t'
+ - fifth is changed
+ - fifth.setype == 'httpd_sys_content_t'
- name: Delete SELinux file context of foo/bar (again)
sefcontext:
@@ -103,8 +103,8 @@
- assert:
that:
- - sixth is not changed
- - sixth.setype == 'unlabeled_t'
+ - sixth is not changed
+ - sixth.setype == 'unlabeled_t'
- name: Set SELinux file context path substitution of foo
sefcontext:
@@ -116,8 +116,8 @@
- assert:
that:
- - subst_first is changed
- - subst_first.substitute == '/home'
+ - subst_first is changed
+ - subst_first.substitute == '/home'
- name: Set SELinux file context path substitution of foo (again)
sefcontext:
@@ -129,8 +129,8 @@
- assert:
that:
- - subst_second is not changed
- - subst_second.substitute == '/home'
+ - subst_second is not changed
+ - subst_second.substitute == '/home'
- name: Change SELinux file context path substitution of foo
sefcontext:
@@ -142,8 +142,8 @@
- assert:
that:
- - subst_third is changed
- - subst_third.substitute == '/boot'
+ - subst_third is changed
+ - subst_third.substitute == '/boot'
- name: Change SELinux file context path substitution of foo (again)
sefcontext:
@@ -155,8 +155,8 @@
- assert:
that:
- - subst_fourth is not changed
- - subst_fourth.substitute == '/boot'
+ - subst_fourth is not changed
+ - subst_fourth.substitute == '/boot'
- name: Try to delete non-existing SELinux file context path substitution of foo
sefcontext:
@@ -168,8 +168,8 @@
- assert:
that:
- - subst_fifth is not changed
- - subst_fifth.substitute == '/dev'
+ - subst_fifth is not changed
+ - subst_fifth.substitute == '/dev'
- name: Delete SELinux file context path substitution of foo
sefcontext:
@@ -181,8 +181,8 @@
- assert:
that:
- - subst_sixth is changed
- - subst_sixth.substitute == '/boot'
+ - subst_sixth is changed
+ - subst_sixth.substitute == '/boot'
- name: Delete SELinux file context path substitution of foo (again)
sefcontext:
@@ -194,8 +194,8 @@
- assert:
that:
- - subst_seventh is not changed
- - subst_seventh.substitute == '/boot'
+ - subst_seventh is not changed
+ - subst_seventh.substitute == '/boot'
- name: Set SELinux file context path substitution of foo
sefcontext:
@@ -207,8 +207,8 @@
- assert:
that:
- - subst_eighth is changed
- - subst_eighth.substitute == '/home'
+ - subst_eighth is changed
+ - subst_eighth.substitute == '/home'
- name: Delete SELinux file context path substitution of foo
sefcontext:
@@ -219,7 +219,7 @@
- assert:
that:
- - subst_ninth is changed
+ - subst_ninth is changed
- name: Delete SELinux file context path substitution of foo (again)
sefcontext:
@@ -230,4 +230,4 @@
- assert:
that:
- - subst_tenth is not changed
+ - subst_tenth is not changed
diff --git a/tests/integration/targets/setup_cron/tasks/main.yml b/tests/integration/targets/setup_cron/tasks/main.yml
index b669f0219f..92d2893403 100644
--- a/tests/integration/targets/setup_cron/tasks/main.yml
+++ b/tests/integration/targets/setup_cron/tasks/main.yml
@@ -11,65 +11,65 @@
- when:
- not (ansible_os_family == 'Alpine' and ansible_distribution_version is version('3.15', '<')) # TODO
block:
- - name: Include distribution specific variables
- include_vars: '{{ lookup(''first_found'', search) }}'
- vars:
- search:
- files:
- - '{{ ansible_distribution | lower }}.yml'
- - '{{ ansible_os_family | lower }}.yml'
- - '{{ ansible_system | lower }}.yml'
- - default.yml
- paths:
- - vars
- - name: install cron package
- package:
- name: '{{ cron_pkg }}'
- when: (cron_pkg | default(false, true)) is truthy
- register: cron_package_installed
- until: cron_package_installed is success
- - when: (faketime_pkg | default(false, true)) is truthy
- block:
- - name: install cron and faketime packages
+ - name: Include distribution specific variables
+ include_vars: '{{ lookup(''first_found'', search) }}'
+ vars:
+ search:
+ files:
+ - '{{ ansible_distribution | lower }}.yml'
+ - '{{ ansible_os_family | lower }}.yml'
+ - '{{ ansible_system | lower }}.yml'
+ - default.yml
+ paths:
+ - vars
+ - name: install cron package
package:
- name: '{{ faketime_pkg }}'
- register: faketime_package_installed
- until: faketime_package_installed is success
- - name: Find libfaketime path
- shell: '{{ list_pkg_files }} {{ faketime_pkg }} | grep -F libfaketime.so.1'
- register: libfaketime_path
- - when: ansible_service_mgr == 'systemd'
+ name: '{{ cron_pkg }}'
+ when: (cron_pkg | default(false, true)) is truthy
+ register: cron_package_installed
+ until: cron_package_installed is success
+ - when: (faketime_pkg | default(false, true)) is truthy
block:
- - name: create directory for cron drop-in file
- file:
- path: /etc/systemd/system/{{ cron_service }}.service.d
- state: directory
- owner: root
- group: root
- mode: '0755'
- - name: Use faketime with cron service
- copy:
- content: '[Service]
+ - name: install cron and faketime packages
+ package:
+ name: '{{ faketime_pkg }}'
+ register: faketime_package_installed
+ until: faketime_package_installed is success
+ - name: Find libfaketime path
+ shell: '{{ list_pkg_files }} {{ faketime_pkg }} | grep -F libfaketime.so.1'
+ register: libfaketime_path
+ - when: ansible_service_mgr == 'systemd'
+ block:
+ - name: create directory for cron drop-in file
+ file:
+ path: /etc/systemd/system/{{ cron_service }}.service.d
+ state: directory
+ owner: root
+ group: root
+ mode: '0755'
+ - name: Use faketime with cron service
+ copy:
+ content: '[Service]
- Environment=LD_PRELOAD={{ libfaketime_path.stdout_lines[0].strip() }}
+ Environment=LD_PRELOAD={{ libfaketime_path.stdout_lines[0].strip() }}
- Environment="FAKETIME=+0y x10"
+ Environment="FAKETIME=+0y x10"
- Environment=RANDOM_DELAY=0'
- dest: /etc/systemd/system/{{ cron_service }}.service.d/faketime.conf
- owner: root
- group: root
- mode: '0644'
- - when: ansible_system == 'FreeBSD'
- name: Use faketime with cron service
- copy:
- content: cron_env='LD_PRELOAD={{ libfaketime_path.stdout_lines[0].strip() }} FAKETIME="+0y x10"'
- dest: /etc/rc.conf.d/cron
- owner: root
- group: wheel
- mode: '0644'
- - name: enable cron service
- service:
- daemon-reload: '{{ (ansible_service_mgr == ''systemd'') | ternary(true, omit) }}'
- name: '{{ cron_service }}'
- state: restarted
+ Environment=RANDOM_DELAY=0'
+ dest: /etc/systemd/system/{{ cron_service }}.service.d/faketime.conf
+ owner: root
+ group: root
+ mode: '0644'
+ - when: ansible_system == 'FreeBSD'
+ name: Use faketime with cron service
+ copy:
+ content: cron_env='LD_PRELOAD={{ libfaketime_path.stdout_lines[0].strip() }} FAKETIME="+0y x10"'
+ dest: /etc/rc.conf.d/cron
+ owner: root
+ group: wheel
+ mode: '0644'
+ - name: enable cron service
+ service:
+ daemon-reload: '{{ (ansible_service_mgr == ''systemd'') | ternary(true, omit) }}'
+ name: '{{ cron_service }}'
+ state: restarted
diff --git a/tests/integration/targets/setup_docker/tasks/main.yml b/tests/integration/targets/setup_docker/tasks/main.yml
index 19bc7aa8c3..0c1b86951a 100644
--- a/tests/integration/targets/setup_docker/tasks/main.yml
+++ b/tests/integration/targets/setup_docker/tasks/main.yml
@@ -47,7 +47,7 @@
become: true
ansible.builtin.file:
path: /var/run/docker.sock
- mode: 0666
+ mode: "0666"
- name: Install python "requests"
ansible.builtin.pip:
diff --git a/tests/integration/targets/setup_etcd3/defaults/main.yml b/tests/integration/targets/setup_etcd3/defaults/main.yml
index f185ef0c25..3ca15a6220 100644
--- a/tests/integration/targets/setup_etcd3/defaults/main.yml
+++ b/tests/integration/targets/setup_etcd3/defaults/main.yml
@@ -9,7 +9,7 @@
#
etcd3_ver: "v3.2.14"
etcd3_download_server: "https://storage.googleapis.com/etcd"
-#etcd3_download_server: "https://github.com/coreos/etcd/releases/download"
+# etcd3_download_server: "https://github.com/coreos/etcd/releases/download"
etcd3_download_url: "{{ etcd3_download_server }}/{{ etcd3_ver }}/etcd-{{ etcd3_ver }}-linux-amd64.tar.gz"
etcd3_download_location: /tmp/etcd-download-test
etcd3_path: "{{ etcd3_download_location }}/etcd-{{ etcd3_ver }}-linux-amd64"
diff --git a/tests/integration/targets/setup_etcd3/tasks/main.yml b/tests/integration/targets/setup_etcd3/tasks/main.yml
index 1da52e225f..72252cfe51 100644
--- a/tests/integration/targets/setup_etcd3/tasks/main.yml
+++ b/tests/integration/targets/setup_etcd3/tasks/main.yml
@@ -15,90 +15,90 @@
# setup etcd3 for supported distros
- block:
- - name: python 2
- set_fact:
- python_suffix: ""
- when: ansible_python_version is version('3', '<')
+ - name: python 2
+ set_fact:
+ python_suffix: ""
+ when: ansible_python_version is version('3', '<')
- - name: python 3
- set_fact:
- python_suffix: "-py3"
- when: ansible_python_version is version('3', '>=')
+ - name: python 3
+ set_fact:
+ python_suffix: "-py3"
+ when: ansible_python_version is version('3', '>=')
- - include_vars: '{{ item }}'
- with_first_found:
- - files:
- - '{{ ansible_distribution }}-{{ ansible_distribution_major_version }}{{ python_suffix }}.yml'
- - '{{ ansible_distribution }}-{{ ansible_distribution_version }}{{ python_suffix }}.yml'
- - '{{ ansible_os_family }}-{{ ansible_distribution_major_version }}{{ python_suffix }}.yml'
- - '{{ ansible_os_family }}{{ python_suffix }}.yml'
- - 'default{{ python_suffix }}.yml'
- - 'default.yml'
- paths: '../vars'
+ - include_vars: '{{ item }}'
+ with_first_found:
+ - files:
+ - '{{ ansible_distribution }}-{{ ansible_distribution_major_version }}{{ python_suffix }}.yml'
+ - '{{ ansible_distribution }}-{{ ansible_distribution_version }}{{ python_suffix }}.yml'
+ - '{{ ansible_os_family }}-{{ ansible_distribution_major_version }}{{ python_suffix }}.yml'
+ - '{{ ansible_os_family }}{{ python_suffix }}.yml'
+ - 'default{{ python_suffix }}.yml'
+ - 'default.yml'
+ paths: '../vars'
- - name: Upgrade setuptools python2 module
- pip:
- name: setuptools<45
- extra_args: --upgrade
- state: present
- when: python_suffix == ''
+ - name: Upgrade setuptools python2 module
+ pip:
+ name: setuptools<45
+ extra_args: --upgrade
+ state: present
+ when: python_suffix == ''
- - name: Install etcd3 python modules
- pip:
- name: "{{ etcd3_pip_module }}"
- extra_args: --only-binary grpcio
- state: present
+ - name: Install etcd3 python modules
+ pip:
+ name: "{{ etcd3_pip_module }}"
+ extra_args: --only-binary grpcio
+ state: present
- # Check if re-installing etcd3 is required
- - name: Check if etcd3ctl exists for reuse.
- shell: "ETCDCTL_API=3 {{ etcd3_path }}/etcdctl --endpoints=localhost:2379 get foo"
- args:
- executable: /bin/bash
- changed_when: false
- failed_when: false
- register: _testetcd3ctl
+ # Check if re-installing etcd3 is required
+ - name: Check if etcd3ctl exists for reuse.
+ shell: "ETCDCTL_API=3 {{ etcd3_path }}/etcdctl --endpoints=localhost:2379 get foo"
+ args:
+ executable: /bin/bash
+ changed_when: false
+ failed_when: false
+ register: _testetcd3ctl
+
+ - block:
+ # Installing etcd3
+ - name: If can't reuse, prepare download folder
+ file:
+ path: "{{ etcd3_download_location }}"
+ state: directory
+ register: _etcddownloadexists
+ when:
+ - _testetcd3ctl.rc != 0
+
+ - name: Delete download folder if already exists (to start clean)
+ file:
+ path: "{{ etcd3_download_location }}"
+ state: absent
+ when:
+ - _etcddownloadexists is not changed
+
+ - name: Recreate download folder if purged
+ file:
+ path: "{{ etcd3_download_location }}"
+ state: directory
+ when:
+ - _etcddownloadexists is not changed
+
+ - name: Download etcd3
+ unarchive:
+ src: "{{ etcd3_download_url }}"
+ dest: "{{ etcd3_download_location }}"
+ remote_src: true
+
+ # Running etcd3 and kill afterwards if it wasn't running before.
+ - name: Run etcd3
+ shell: "{{ etcd3_path }}/etcd &"
+ register: _etcd3run
+ changed_when: true
+
+ # - name: kill etcd3
+ # command: "pkill etcd"
- - block:
- # Installing etcd3
- - name: If can't reuse, prepare download folder
- file:
- path: "{{ etcd3_download_location }}"
- state: directory
- register: _etcddownloadexists
when:
- _testetcd3ctl.rc != 0
- - name: Delete download folder if already exists (to start clean)
- file:
- path: "{{ etcd3_download_location }}"
- state: absent
- when:
- - _etcddownloadexists is not changed
-
- - name: Recreate download folder if purged
- file:
- path: "{{ etcd3_download_location }}"
- state: directory
- when:
- - _etcddownloadexists is not changed
-
- - name: Download etcd3
- unarchive:
- src: "{{ etcd3_download_url }}"
- dest: "{{ etcd3_download_location }}"
- remote_src: true
-
- # Running etcd3 and kill afterwards if it wasn't running before.
- - name: Run etcd3
- shell: "{{ etcd3_path }}/etcd &"
- register: _etcd3run
- changed_when: true
-
-# - name: kill etcd3
-# command: "pkill etcd"
-
- when:
- - _testetcd3ctl.rc != 0
-
when:
- - ansible_distribution | lower ~ "-" ~ ansible_distribution_major_version | lower != 'centos-6'
+ - ansible_distribution | lower ~ "-" ~ ansible_distribution_major_version | lower != 'centos-6'
diff --git a/tests/integration/targets/setup_flatpak_remote/meta/main.yaml b/tests/integration/targets/setup_flatpak_remote/meta/main.yaml
index 1b3d5b8758..982de6eb03 100644
--- a/tests/integration/targets/setup_flatpak_remote/meta/main.yaml
+++ b/tests/integration/targets/setup_flatpak_remote/meta/main.yaml
@@ -4,4 +4,4 @@
# SPDX-License-Identifier: GPL-3.0-or-later
dependencies:
- - setup_remote_tmp_dir
+ - setup_remote_tmp_dir
diff --git a/tests/integration/targets/setup_flatpak_remote/tasks/main.yaml b/tests/integration/targets/setup_flatpak_remote/tasks/main.yaml
index 037784738a..e63cb379a0 100644
--- a/tests/integration/targets/setup_flatpak_remote/tasks/main.yaml
+++ b/tests/integration/targets/setup_flatpak_remote/tasks/main.yaml
@@ -13,20 +13,20 @@
ansible_distribution == 'Fedora' or
ansible_distribution == 'Ubuntu' and not ansible_distribution_major_version | int < 16
block:
- - name: Copy repo into place
- unarchive:
- src: repo.tar.xz
- dest: '{{ remote_tmp_dir }}'
- owner: root
- group: root
- mode: '0644'
- - name: Create deterministic link to temp directory
- file:
- state: link
- src: '{{ remote_tmp_dir }}/'
- path: /tmp/flatpak
- owner: root
- group: root
- mode: '0644'
- notify: remove temporary flatpak link
+ - name: Copy repo into place
+ unarchive:
+ src: repo.tar.xz
+ dest: '{{ remote_tmp_dir }}'
+ owner: root
+ group: root
+ mode: '0644'
+ - name: Create deterministic link to temp directory
+ file:
+ state: link
+ src: '{{ remote_tmp_dir }}/'
+ path: /tmp/flatpak
+ owner: root
+ group: root
+ mode: '0644'
+ notify: remove temporary flatpak link
become: true
diff --git a/tests/integration/targets/setup_java_keytool/tasks/main.yml b/tests/integration/targets/setup_java_keytool/tasks/main.yml
index 9f156425d8..56ba7a9581 100644
--- a/tests/integration/targets/setup_java_keytool/tasks/main.yml
+++ b/tests/integration/targets/setup_java_keytool/tasks/main.yml
@@ -22,6 +22,7 @@
files:
- '{{ ansible_distribution }}-{{ ansible_distribution_version }}.yml'
- '{{ ansible_distribution }}-{{ ansible_distribution_major_version }}.yml'
+ - '{{ ansible_distribution }}.yml'
- '{{ ansible_os_family }}.yml'
paths:
- '{{ role_path }}/vars'
diff --git a/tests/integration/targets/setup_java_keytool/vars/Fedora.yml b/tests/integration/targets/setup_java_keytool/vars/Fedora.yml
new file mode 100644
index 0000000000..5f77ea9bba
--- /dev/null
+++ b/tests/integration/targets/setup_java_keytool/vars/Fedora.yml
@@ -0,0 +1,7 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+keytool_package_names:
+ - java-21-openjdk-headless
diff --git a/tests/integration/targets/setup_openssl/tasks/main.yml b/tests/integration/targets/setup_openssl/tasks/main.yml
index b8e003710a..6490e65b41 100644
--- a/tests/integration/targets/setup_openssl/tasks/main.yml
+++ b/tests/integration/targets/setup_openssl/tasks/main.yml
@@ -13,13 +13,13 @@
vars:
search:
files:
- - '{{ ansible_distribution }}-{{ ansible_distribution_major_version }}.yml'
- - '{{ ansible_distribution }}-{{ ansible_distribution_version }}.yml'
- - '{{ ansible_distribution }}.yml'
- - '{{ ansible_os_family }}.yml'
- - default.yml
+ - '{{ ansible_distribution }}-{{ ansible_distribution_major_version }}.yml'
+ - '{{ ansible_distribution }}-{{ ansible_distribution_version }}.yml'
+ - '{{ ansible_distribution }}.yml'
+ - '{{ ansible_os_family }}.yml'
+ - default.yml
paths:
- - vars
+ - vars
- name: Install OpenSSL
become: true
@@ -29,24 +29,24 @@
- when: ansible_facts.distribution ~ ansible_facts.distribution_major_version not in ['CentOS6', 'RedHat6']
block:
- - name: Install cryptography (Python 3)
- become: true
- package:
- name: '{{ cryptography_package_name_python3 }}'
- when: not cryptography_from_pip and ansible_python_version is version('3.0', '>=')
+ - name: Install cryptography (Python 3)
+ become: true
+ package:
+ name: '{{ cryptography_package_name_python3 }}'
+ when: not cryptography_from_pip and ansible_python_version is version('3.0', '>=')
- - name: Install cryptography (Python 2)
- become: true
- package:
- name: '{{ cryptography_package_name }}'
- when: not cryptography_from_pip and ansible_python_version is version('3.0', '<')
+ - name: Install cryptography (Python 2)
+ become: true
+ package:
+ name: '{{ cryptography_package_name }}'
+ when: not cryptography_from_pip and ansible_python_version is version('3.0', '<')
- - name: Install cryptography (pip)
- become: true
- pip:
- name: cryptography>=3.3
- extra_args: "-c {{ remote_constraints }}"
- when: cryptography_from_pip
+ - name: Install cryptography (pip)
+ become: true
+ pip:
+ name: cryptography>=3.3
+ extra_args: "-c {{ remote_constraints }}"
+ when: cryptography_from_pip
- name: Install pyOpenSSL (Python 3)
become: true
diff --git a/tests/integration/targets/setup_openssl/vars/RedHat-10.yml b/tests/integration/targets/setup_openssl/vars/RedHat-10.yml
new file mode 100644
index 0000000000..ac9b3344eb
--- /dev/null
+++ b/tests/integration/targets/setup_openssl/vars/RedHat-10.yml
@@ -0,0 +1,9 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+cryptography_package_name: python-cryptography
+cryptography_package_name_python3: python3-cryptography
+openssl_package_name: openssl
+cryptography_from_pip: false
diff --git a/tests/integration/targets/setup_postgresql_db/defaults/main.yml b/tests/integration/targets/setup_postgresql_db/defaults/main.yml
index 1a33ecafab..8480995959 100644
--- a/tests/integration/targets/setup_postgresql_db/defaults/main.yml
+++ b/tests/integration/targets/setup_postgresql_db/defaults/main.yml
@@ -6,8 +6,8 @@
postgresql_service: postgresql
postgresql_packages:
- - postgresql-server
- - python-psycopg2
+ - postgresql-server
+ - python-psycopg2
pg_user: postgres
pg_group: root
diff --git a/tests/integration/targets/setup_postgresql_db/tasks/main.yml b/tests/integration/targets/setup_postgresql_db/tasks/main.yml
index 99668ebc95..d6b8701e9a 100644
--- a/tests/integration/targets/setup_postgresql_db/tasks/main.yml
+++ b/tests/integration/targets/setup_postgresql_db/tasks/main.yml
@@ -35,12 +35,12 @@
vars:
params:
files:
- - '{{ ansible_distribution }}-{{ ansible_distribution_major_version }}{{ python_suffix }}.yml'
- - '{{ ansible_distribution }}-{{ ansible_distribution_version }}{{ python_suffix }}.yml'
- - '{{ ansible_os_family }}{{ python_suffix }}.yml'
- - default{{ python_suffix }}.yml
+ - '{{ ansible_distribution }}-{{ ansible_distribution_major_version }}{{ python_suffix }}.yml'
+ - '{{ ansible_distribution }}-{{ ansible_distribution_version }}{{ python_suffix }}.yml'
+ - '{{ ansible_os_family }}{{ python_suffix }}.yml'
+ - default{{ python_suffix }}.yml
paths:
- - '{{ role_path }}/vars'
+ - '{{ role_path }}/vars'
- name: make sure the dbus service is started under systemd
systemd:
@@ -79,8 +79,8 @@
ignore_errors: true
when: ansible_os_family == "Debian"
loop:
- - /etc/postgresql
- - /var/lib/postgresql
+ - /etc/postgresql
+ - /var/lib/postgresql
loop_control:
loop_var: loop_item
@@ -170,38 +170,38 @@
name: '{{ item }}'
state: present
with_items:
- - pt_BR
- - es_ES
+ - pt_BR
+ - es_ES
when: ansible_os_family == 'Debian'
- block:
- - name: Install langpacks (RHEL8)
- yum:
- name:
- - glibc-langpack-es
- - glibc-langpack-pt
- - glibc-all-langpacks
- state: present
- when: ansible_distribution_major_version is version('8', '>=')
+ - name: Install langpacks (RHEL8)
+ yum:
+ name:
+ - glibc-langpack-es
+ - glibc-langpack-pt
+ - glibc-all-langpacks
+ state: present
+ when: ansible_distribution_major_version is version('8', '>=')
- - name: Check if locales need to be generated (RedHat)
- shell: localedef --list-archive | grep -a -q '^{{ locale }}$'
- register: locale_present
- ignore_errors: true
- with_items:
- - es_ES
- - pt_BR
- loop_control:
- loop_var: locale
+ - name: Check if locales need to be generated (RedHat)
+ shell: localedef --list-archive | grep -a -q '^{{ locale }}$'
+ register: locale_present
+ ignore_errors: true
+ with_items:
+ - es_ES
+ - pt_BR
+ loop_control:
+ loop_var: locale
- - name: Reinstall internationalization files
- shell: yum -y reinstall glibc-common || yum -y install glibc-common
- when: locale_present is failed
+ - name: Reinstall internationalization files
+ shell: yum -y reinstall glibc-common || yum -y install glibc-common
+ when: locale_present is failed
- - name: Generate locale (RedHat)
- command: localedef -f ISO-8859-1 -i {{ item.locale }} {{ item.locale }}
- when: item is failed
- with_items: '{{ locale_present.results }}'
+ - name: Generate locale (RedHat)
+ command: localedef -f ISO-8859-1 -i {{ item.locale }} {{ item.locale }}
+ when: item is failed
+ with_items: '{{ locale_present.results }}'
when: ansible_os_family == 'RedHat' and ansible_distribution != 'Fedora'
- name: Install glibc langpacks (Fedora >= 24)
@@ -209,8 +209,8 @@
name: '{{ item }}'
state: latest
with_items:
- - glibc-langpack-es
- - glibc-langpack-pt
+ - glibc-langpack-es
+ - glibc-langpack-pt
when: ansible_distribution == 'Fedora' and ansible_distribution_major_version is version('24', '>=')
- name: enable postgresql service (FreeBSD)
@@ -257,9 +257,9 @@
dest: /usr/share/postgresql/{{ pg_ver }}/extension/{{ item }}
mode: '0444'
with_items:
- - dummy--1.0.sql
- - dummy--2.0.sql
- - dummy--3.0.sql
+ - dummy--1.0.sql
+ - dummy--2.0.sql
+ - dummy--3.0.sql
when: ansible_os_family == 'Debian'
- name: add update paths
@@ -268,8 +268,8 @@
mode: '0444'
state: touch
with_items:
- - dummy--1.0--2.0.sql
- - dummy--2.0--3.0.sql
+ - dummy--1.0--2.0.sql
+ - dummy--2.0--3.0.sql
when: ansible_os_family == 'Debian'
- name: Get PostgreSQL version
diff --git a/tests/integration/targets/setup_redis_replication/defaults/main.yml b/tests/integration/targets/setup_redis_replication/defaults/main.yml
index 301c5a6e84..cc2d632b85 100644
--- a/tests/integration/targets/setup_redis_replication/defaults/main.yml
+++ b/tests/integration/targets/setup_redis_replication/defaults/main.yml
@@ -6,25 +6,25 @@
# General
redis_packages:
Alpine:
- - redis
+ - redis
Archlinux:
- - redis
+ - valkey
Debian:
- - redis-server
+ - redis-server
Ubuntu:
- - redis-server
+ - redis-server
openSUSE Leap:
- - redis
+ - redis
Fedora:
- - redis
+ - redis
CentOS:
- - redis
+ - redis
FreeBSD:
- - redis
+ - redis
redis_user:
Alpine: redis
- Archlinux: redis
+ Archlinux: valkey
Debian: redis
Ubuntu: redis
openSUSE Leap: redis
diff --git a/tests/integration/targets/setup_redis_replication/handlers/main.yml b/tests/integration/targets/setup_redis_replication/handlers/main.yml
index a0595cbe30..46c7571da1 100644
--- a/tests/integration/targets/setup_redis_replication/handlers/main.yml
+++ b/tests/integration/targets/setup_redis_replication/handlers/main.yml
@@ -28,12 +28,12 @@
path: "{{ item }}"
state: absent
loop:
- - "{{ master_conf }}"
- - "{{ master_datadir }}"
- - "{{ master_logdir }}"
- - /var/run/redis_{{ master_port }}.pid
- - "{{ replica_conf }}"
- - "{{ replica_datadir }}"
- - "{{ replica_logdir }}"
- - /var/run/redis_{{ replica_port }}.pid
+ - "{{ master_conf }}"
+ - "{{ master_datadir }}"
+ - "{{ master_logdir }}"
+ - /var/run/redis_{{ master_port }}.pid
+ - "{{ replica_conf }}"
+ - "{{ replica_datadir }}"
+ - "{{ replica_logdir }}"
+ - /var/run/redis_{{ replica_port }}.pid
listen: cleanup redis
diff --git a/tests/integration/targets/setup_redis_replication/meta/main.yml b/tests/integration/targets/setup_redis_replication/meta/main.yml
index db2617f4ce..2d6cafb56f 100644
--- a/tests/integration/targets/setup_redis_replication/meta/main.yml
+++ b/tests/integration/targets/setup_redis_replication/meta/main.yml
@@ -4,5 +4,5 @@
# SPDX-License-Identifier: GPL-3.0-or-later
dependencies:
-- setup_pkg_mgr
-- setup_remote_constraints
+ - setup_pkg_mgr
+ - setup_remote_constraints
diff --git a/tests/integration/targets/setup_redis_replication/tasks/main.yml b/tests/integration/targets/setup_redis_replication/tasks/main.yml
index 076a473594..92ac4fe750 100644
--- a/tests/integration/targets/setup_redis_replication/tasks/main.yml
+++ b/tests/integration/targets/setup_redis_replication/tasks/main.yml
@@ -9,4 +9,4 @@
- import_tasks: setup_redis_cluster.yml
when:
- - ansible_distribution in ['CentOS', 'Fedora', 'FreeBSD', 'openSUSE Leap', 'Ubuntu', 'Debian', 'Archlinux', 'Alpine']
+ - ansible_distribution in ['CentOS', 'Fedora', 'FreeBSD', 'openSUSE Leap', 'Ubuntu', 'Debian', 'Archlinux', 'Alpine']
diff --git a/tests/integration/targets/setup_redis_replication/tasks/setup_redis_cluster.yml b/tests/integration/targets/setup_redis_replication/tasks/setup_redis_cluster.yml
index 72f1703832..3c9c62ba00 100644
--- a/tests/integration/targets/setup_redis_replication/tasks/setup_redis_cluster.yml
+++ b/tests/integration/targets/setup_redis_replication/tasks/setup_redis_cluster.yml
@@ -21,16 +21,16 @@
notify: cleanup redis
- name: Create redis directories
- file:
+ file:
path: "{{ item }}"
- state: directory
+ state: directory
owner: "{{ redis_user[ansible_distribution] }}"
group: "{{ redis_user[ansible_distribution] }}"
loop:
- - "{{ master_datadir }}"
- - "{{ master_logdir }}"
- - "{{ replica_datadir }}"
- - "{{ replica_logdir }}"
+ - "{{ master_datadir }}"
+ - "{{ master_logdir }}"
+ - "{{ replica_datadir }}"
+ - "{{ replica_logdir }}"
- name: Create redis configs
copy:
@@ -44,14 +44,14 @@
requirepass {{ redis_password }}
masterauth {{ redis_password }}
loop:
- - file: "{{ master_conf }}"
- port: "{{ master_port }}"
- logdir: "{{ master_logdir }}"
- datadir: "{{ master_datadir }}"
- - file: "{{ replica_conf }}"
- port: "{{ replica_port }}"
- logdir: "{{ replica_logdir }}"
- datadir: "{{ replica_datadir }}"
+ - file: "{{ master_conf }}"
+ port: "{{ master_port }}"
+ logdir: "{{ master_logdir }}"
+ datadir: "{{ master_datadir }}"
+ - file: "{{ replica_conf }}"
+ port: "{{ replica_port }}"
+ logdir: "{{ replica_logdir }}"
+ datadir: "{{ replica_datadir }}"
- name: Start redis master
ansible.builtin.command: "{{ redis_bin[ansible_distribution] }} {{ master_conf }}"
diff --git a/tests/integration/targets/setup_rundeck/defaults/main.yml b/tests/integration/targets/setup_rundeck/defaults/main.yml
index c842901c0f..1c9858ad25 100644
--- a/tests/integration/targets/setup_rundeck/defaults/main.yml
+++ b/tests/integration/targets/setup_rundeck/defaults/main.yml
@@ -3,5 +3,13 @@
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-rundeck_war_url: https://packagecloud.io/pagerduty/rundeck/packages/java/org.rundeck/rundeck-3.4.4-20210920.war/artifacts/rundeck-3.4.4-20210920.war/download
-rundeck_cli_url: https://github.com/rundeck/rundeck-cli/releases/download/v1.3.10/rundeck-cli-1.3.10-all.jar
+rundeck_version: 5.11.1-20250415
+rundeck_cli_version: "2.0.8"
+
+rundeck_war_url:
+ "https://packagecloud.io/pagerduty/rundeck/packages/java/org.rundeck/\
+ rundeck-{{ rundeck_version }}.war/artifacts/rundeck-{{ rundeck_version }}.war/download"
+
+rundeck_cli_url:
+ "https://github.com/rundeck/rundeck-cli/releases/download/\
+ v{{ rundeck_cli_version }}/rundeck-cli-{{ rundeck_cli_version }}-all.jar"
diff --git a/tests/integration/targets/setup_rundeck/vars/RedHat.yml b/tests/integration/targets/setup_rundeck/vars/RedHat.yml
index 314f0ef415..bba076aecd 100644
--- a/tests/integration/targets/setup_rundeck/vars/RedHat.yml
+++ b/tests/integration/targets/setup_rundeck/vars/RedHat.yml
@@ -3,4 +3,4 @@
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
-openjdk_pkg: java-1.8.0-openjdk
+openjdk_pkg: java-11-openjdk-headless
diff --git a/tests/integration/targets/setup_snap/tasks/D-RedHat-10.yml b/tests/integration/targets/setup_snap/tasks/D-RedHat-10.yml
new file mode 120000
index 0000000000..0b06951496
--- /dev/null
+++ b/tests/integration/targets/setup_snap/tasks/D-RedHat-10.yml
@@ -0,0 +1 @@
+nothing.yml
\ No newline at end of file
diff --git a/tests/integration/targets/setup_snap/tasks/D-RedHat-9.3.yml b/tests/integration/targets/setup_snap/tasks/D-RedHat-9.3.yml
deleted file mode 100644
index 5bbfaff128..0000000000
--- a/tests/integration/targets/setup_snap/tasks/D-RedHat-9.3.yml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-# Copyright (c) Ansible Project
-# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# Do nothing
diff --git a/tests/integration/targets/setup_snap/tasks/D-RedHat-9.3.yml b/tests/integration/targets/setup_snap/tasks/D-RedHat-9.3.yml
new file mode 120000
index 0000000000..0b06951496
--- /dev/null
+++ b/tests/integration/targets/setup_snap/tasks/D-RedHat-9.3.yml
@@ -0,0 +1 @@
+nothing.yml
\ No newline at end of file
diff --git a/tests/integration/targets/setup_snap/tasks/D-RedHat-9.5.yml b/tests/integration/targets/setup_snap/tasks/D-RedHat-9.5.yml
deleted file mode 100644
index 5bbfaff128..0000000000
--- a/tests/integration/targets/setup_snap/tasks/D-RedHat-9.5.yml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-# Copyright (c) Ansible Project
-# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# Do nothing
diff --git a/tests/integration/targets/setup_snap/tasks/D-RedHat-9.5.yml b/tests/integration/targets/setup_snap/tasks/D-RedHat-9.5.yml
new file mode 120000
index 0000000000..0b06951496
--- /dev/null
+++ b/tests/integration/targets/setup_snap/tasks/D-RedHat-9.5.yml
@@ -0,0 +1 @@
+nothing.yml
\ No newline at end of file
diff --git a/tests/integration/targets/setup_snap/tasks/D-RedHat-9.6.yml b/tests/integration/targets/setup_snap/tasks/D-RedHat-9.6.yml
new file mode 120000
index 0000000000..0b06951496
--- /dev/null
+++ b/tests/integration/targets/setup_snap/tasks/D-RedHat-9.6.yml
@@ -0,0 +1 @@
+nothing.yml
\ No newline at end of file
diff --git a/tests/integration/targets/setup_wildfly_server/handlers/main.yml b/tests/integration/targets/setup_wildfly_server/handlers/main.yml
index 1383b15753..38522db30a 100644
--- a/tests/integration/targets/setup_wildfly_server/handlers/main.yml
+++ b/tests/integration/targets/setup_wildfly_server/handlers/main.yml
@@ -14,5 +14,5 @@
path: '{{ item }}'
state: absent
loop:
- - '{{ wf_service_file_path }}'
- - '{{ default_deploy_root }}'
+ - '{{ wf_service_file_path }}'
+ - '{{ default_deploy_root }}'
diff --git a/tests/integration/targets/setup_wildfly_server/meta/main.yml b/tests/integration/targets/setup_wildfly_server/meta/main.yml
index 2d29ebb672..ca1915e05c 100644
--- a/tests/integration/targets/setup_wildfly_server/meta/main.yml
+++ b/tests/integration/targets/setup_wildfly_server/meta/main.yml
@@ -4,5 +4,5 @@
# SPDX-License-Identifier: GPL-3.0-or-later
dependencies:
-- setup_pkg_mgr
-- setup_remote_tmp_dir
+ - setup_pkg_mgr
+ - setup_remote_tmp_dir
diff --git a/tests/integration/targets/shutdown/tasks/main.yml b/tests/integration/targets/shutdown/tasks/main.yml
index 2c9bc6bd6d..2903248407 100644
--- a/tests/integration/targets/shutdown/tasks/main.yml
+++ b/tests/integration/targets/shutdown/tasks/main.yml
@@ -44,7 +44,7 @@
- name: Verify shutdown command is present in Alpine except systemd
assert:
that: '"poweroff" in shutdown_result["shutdown_command"]'
- when:
+ when:
- "ansible_os_family == 'Alpine'"
- '"systemctl" not in shutdown_result["shutdown_command"]'
@@ -52,7 +52,7 @@
- name: Verify shutdown command is present in VMKernel except systemd
assert:
that: '"halt" in shutdown_result["shutdown_command"]'
- when:
+ when:
- "ansible_system == 'VMKernel'"
- '"systemctl" not in shutdown_result["shutdown_command"]'
@@ -111,7 +111,7 @@
community.general.shutdown:
register: shutdown_result
check_mode: true
- when:
+ when:
- "(ansible_distribution == 'Ubuntu' and ansible_distribution_major_version is version('18', '>=')) or (ansible_distribution == 'Debian')"
- '"systemd-sysv" not in ansible_facts.packages'
diff --git a/tests/integration/targets/snap_alias/tasks/test.yml b/tests/integration/targets/snap_alias/tasks/test.yml
index 50e6e33b49..87a7419358 100644
--- a/tests/integration/targets/snap_alias/tasks/test.yml
+++ b/tests/integration/targets/snap_alias/tasks/test.yml
@@ -43,12 +43,12 @@
- name: assert single alias
assert:
that:
- - alias_single_0 is changed
- - alias_single_1 is changed
- - alias_single_2 is not changed
- - alias_single_3 is not changed
- - 'alias_single_1.snap_aliases["hello-world"] == ["hw"]'
- - 'alias_single_3.snap_aliases["hello-world"] == ["hw"]'
+ - alias_single_0 is changed
+ - alias_single_1 is changed
+ - alias_single_2 is not changed
+ - alias_single_3 is not changed
+ - 'alias_single_1.snap_aliases["hello-world"] == ["hw"]'
+ - 'alias_single_3.snap_aliases["hello-world"] == ["hw"]'
- name: Create multiple aliases (check mode)
community.general.snap_alias:
@@ -79,12 +79,12 @@
- name: assert multi alias
assert:
that:
- - alias_multi_0 is changed
- - alias_multi_1 is changed
- - alias_multi_2 is not changed
- - alias_multi_3 is not changed
- - 'alias_multi_1.snap_aliases["hello-world"] == ["hw", "hw2", "hw3"]'
- - 'alias_multi_3.snap_aliases["hello-world"] == ["hw", "hw2", "hw3"]'
+ - alias_multi_0 is changed
+ - alias_multi_1 is changed
+ - alias_multi_2 is not changed
+ - alias_multi_3 is not changed
+ - 'alias_multi_1.snap_aliases["hello-world"] == ["hw", "hw2", "hw3"]'
+ - 'alias_multi_3.snap_aliases["hello-world"] == ["hw", "hw2", "hw3"]'
- name: Remove one specific alias (check mode)
community.general.snap_alias:
@@ -115,12 +115,12 @@
- name: assert remove alias
assert:
that:
- - alias_remove_0 is changed
- - alias_remove_1 is changed
- - alias_remove_2 is not changed
- - alias_remove_3 is not changed
- - 'alias_remove_1.snap_aliases["hello-world"] == ["hw2", "hw3"]'
- - 'alias_remove_3.snap_aliases["hello-world"] == ["hw2", "hw3"]'
+ - alias_remove_0 is changed
+ - alias_remove_1 is changed
+ - alias_remove_2 is not changed
+ - alias_remove_3 is not changed
+ - 'alias_remove_1.snap_aliases["hello-world"] == ["hw2", "hw3"]'
+ - 'alias_remove_3.snap_aliases["hello-world"] == ["hw2", "hw3"]'
- name: Remove all aliases for snap (check mode)
community.general.snap_alias:
@@ -151,9 +151,9 @@
- name: assert remove_all alias
assert:
that:
- - alias_remove_all_0 is changed
- - alias_remove_all_1 is changed
- - alias_remove_all_2 is not changed
- - alias_remove_all_3 is not changed
- - 'alias_remove_all_1.snap_aliases["hello-world"] == []'
- - 'alias_remove_all_3.snap_aliases["hello-world"] == []'
+ - alias_remove_all_0 is changed
+ - alias_remove_all_1 is changed
+ - alias_remove_all_2 is not changed
+ - alias_remove_all_3 is not changed
+ - 'alias_remove_all_1.snap_aliases["hello-world"] == []'
+ - 'alias_remove_all_3.snap_aliases["hello-world"] == []'
diff --git a/tests/integration/targets/spectrum_model_attrs/tasks/main.yml b/tests/integration/targets/spectrum_model_attrs/tasks/main.yml
index 42e53d7d7d..ecc1eb6005 100644
--- a/tests/integration/targets/spectrum_model_attrs/tasks/main.yml
+++ b/tests/integration/targets/spectrum_model_attrs/tasks/main.yml
@@ -14,65 +14,65 @@
or oneclick_url is not defined
- block:
- - name: "001: Enforce maintenance mode for {{ model_name }} with a note about why [check_mode test]"
- spectrum_model_attrs: &mm_enabled_args
- url: "{{ oneclick_url }}"
- username: "{{ oneclick_username }}"
- password: "{{ oneclick_password }}"
- name: "{{ model_name }}"
- type: "{{ model_type }}"
- validate_certs: false
- attributes:
- - name: "isManaged"
- value: "false"
- - name: "Notes"
- value: "{{ note_mm_enabled }}"
- check_mode: true
- register: mm_enabled_check_mode
+ - name: "001: Enforce maintenance mode for {{ model_name }} with a note about why [check_mode test]"
+ spectrum_model_attrs: &mm_enabled_args
+ url: "{{ oneclick_url }}"
+ username: "{{ oneclick_username }}"
+ password: "{{ oneclick_password }}"
+ name: "{{ model_name }}"
+ type: "{{ model_type }}"
+ validate_certs: false
+ attributes:
+ - name: "isManaged"
+ value: "false"
+ - name: "Notes"
+ value: "{{ note_mm_enabled }}"
+ check_mode: true
+ register: mm_enabled_check_mode
- - name: "001: assert that changes were made"
- assert:
- that:
- - mm_enabled_check_mode is changed
+ - name: "001: assert that changes were made"
+ assert:
+ that:
+ - mm_enabled_check_mode is changed
- - name: "001: assert that changed_attrs is properly set"
- assert:
- that:
- - mm_enabled_check_mode.changed_attrs.Notes == note_mm_enabled
- - mm_enabled_check_mode.changed_attrs.isManaged == "false"
+ - name: "001: assert that changed_attrs is properly set"
+ assert:
+ that:
+ - mm_enabled_check_mode.changed_attrs.Notes == note_mm_enabled
+ - mm_enabled_check_mode.changed_attrs.isManaged == "false"
- - name: "002: Enforce maintenance mode for {{ model_name }} with a note about why"
- spectrum_model_attrs:
- <<: *mm_enabled_args
- register: mm_enabled
- check_mode: false
+ - name: "002: Enforce maintenance mode for {{ model_name }} with a note about why"
+ spectrum_model_attrs:
+ <<: *mm_enabled_args
+ register: mm_enabled
+ check_mode: false
- - name: "002: assert that changes were made"
- assert:
- that:
- - mm_enabled is changed
+ - name: "002: assert that changes were made"
+ assert:
+ that:
+ - mm_enabled is changed
- - name: "002: assert that changed_attrs is properly set"
- assert:
- that:
- - mm_enabled.changed_attrs.Notes == note_mm_enabled
- - mm_enabled.changed_attrs.isManaged == "false"
+ - name: "002: assert that changed_attrs is properly set"
+ assert:
+ that:
+ - mm_enabled.changed_attrs.Notes == note_mm_enabled
+ - mm_enabled.changed_attrs.isManaged == "false"
- - name: "003: Enforce maintenance mode for {{ model_name }} with a note about why [idempontence test]"
- spectrum_model_attrs:
- <<: *mm_enabled_args
- register: mm_enabled_idp
- check_mode: false
+ - name: "003: Enforce maintenance mode for {{ model_name }} with a note about why [idempontence test]"
+ spectrum_model_attrs:
+ <<: *mm_enabled_args
+ register: mm_enabled_idp
+ check_mode: false
- - name: "003: assert that changes were not made"
- assert:
- that:
- - mm_enabled_idp is not changed
+ - name: "003: assert that changes were not made"
+ assert:
+ that:
+ - mm_enabled_idp is not changed
- - name: "003: assert that changed_attrs is not set"
- assert:
- that:
- - mm_enabled_idp.changed_attrs == {}
+ - name: "003: assert that changed_attrs is not set"
+ assert:
+ that:
+ - mm_enabled_idp.changed_attrs == {}
vars:
note_mm_enabled: "MM set via CO #1234 by OJ Simpson"
diff --git a/tests/integration/targets/ssh_config/tasks/options.yml b/tests/integration/targets/ssh_config/tasks/options.yml
index d342943975..2f93b952bd 100644
--- a/tests/integration/targets/ssh_config/tasks/options.yml
+++ b/tests/integration/targets/ssh_config/tasks/options.yml
@@ -20,7 +20,7 @@
identities_only: true
controlmaster: "auto"
controlpath: "~/.ssh/sockets/%r@%h-%p"
- controlpersist: yes
+ controlpersist: "yes"
dynamicforward: '10080'
other_options:
serveraliveinterval: '30'
@@ -57,7 +57,7 @@
identities_only: true
controlmaster: "auto"
controlpath: "~/.ssh/sockets/%r@%h-%p"
- controlpersist: yes
+ controlpersist: "yes"
dynamicforward: '10080'
other_options:
serveraliveinterval: '30'
@@ -83,7 +83,7 @@
identities_only: true
controlmaster: "auto"
controlpath: "~/.ssh/sockets/%r@%h-%p"
- controlpersist: yes
+ controlpersist: "yes"
dynamicforward: '10080'
other_options:
serveraliveinterval: '30'
@@ -126,7 +126,7 @@
add_keys_to_agent: false
host_key_algorithms: "+ssh-ed25519"
identities_only: false
- controlmaster: no
+ controlmaster: "no"
controlpath: "~/.ssh/new-sockets/%r@%h-%p"
controlpersist: "600"
dynamicforward: '11080'
@@ -154,7 +154,7 @@
add_keys_to_agent: false
host_key_algorithms: "+ssh-ed25519"
identities_only: false
- controlmaster: no
+ controlmaster: "no"
controlpath: "~/.ssh/new-sockets/%r@%h-%p"
controlpersist: "600"
dynamicforward: '11080'
@@ -298,7 +298,7 @@
identities_only: true
controlmaster: "auto"
controlpath: "~/.ssh/sockets/%r@%h-%p"
- controlpersist: yes
+ controlpersist: "yes"
dynamicforward: '10080'
other_options:
serveraliveinterval: '30'
@@ -335,7 +335,7 @@
identities_only: true
controlmaster: "auto"
controlpath: "~/.ssh/sockets/%r@%h-%p"
- controlpersist: yes
+ controlpersist: "yes"
dynamicforward: '10080'
other_options:
serveraliveinterval: '30'
@@ -361,7 +361,7 @@
identities_only: true
controlmaster: "auto"
controlpath: "~/.ssh/sockets/%r@%h-%p"
- controlpersist: yes
+ controlpersist: "yes"
dynamicforward: '10080'
other_options:
serveraliveinterval: '30'
@@ -404,7 +404,7 @@
add_keys_to_agent: false
host_key_algorithms: "+ssh-ed25519"
identities_only: false
- controlmaster: no
+ controlmaster: "no"
controlpath: "~/.ssh/new-sockets/%r@%h-%p"
controlpersist: "600"
dynamicforward: '11080'
@@ -432,7 +432,7 @@
add_keys_to_agent: false
host_key_algorithms: "+ssh-ed25519"
identities_only: false
- controlmaster: no
+ controlmaster: "no"
controlpath: "~/.ssh/new-sockets/%r@%h-%p"
controlpersist: "600"
dynamicforward: '11080'
diff --git a/tests/integration/targets/supervisorctl/aliases b/tests/integration/targets/supervisorctl/aliases
index 77ed76c52d..e28f8d0919 100644
--- a/tests/integration/targets/supervisorctl/aliases
+++ b/tests/integration/targets/supervisorctl/aliases
@@ -7,3 +7,4 @@ destructive
skip/aix
skip/rhel # TODO executables are installed in /usr/local/bin, which isn't part of $PATH
skip/macos # TODO executables are installed in /Library/Frameworks/Python.framework/Versions/3.11/bin, which isn't part of $PATH
+unstable # TODO fix!
diff --git a/tests/integration/targets/supervisorctl/tasks/main.yml b/tests/integration/targets/supervisorctl/tasks/main.yml
index 24b505d9ad..dd47971a31 100644
--- a/tests/integration/targets/supervisorctl/tasks/main.yml
+++ b/tests/integration/targets/supervisorctl/tasks/main.yml
@@ -15,45 +15,45 @@
- ansible_version.minor != 16 or ansible_distribution not in ['CentOS', 'openSUSE Leap']
block:
- block:
- - tempfile:
- state: directory
- suffix: supervisorctl-tests
- register: supervisord_sock_path
+ - tempfile:
+ state: directory
+ suffix: supervisorctl-tests
+ register: supervisord_sock_path
- - command: 'echo {{ remote_tmp_dir }}'
- register: echo
- - set_fact:
- remote_dir: '{{ echo.stdout }}'
+ - command: 'echo {{ remote_tmp_dir }}'
+ register: echo
+ - set_fact:
+ remote_dir: '{{ echo.stdout }}'
- - include_vars: '{{ item }}'
- with_first_found:
- - files:
- - '{{ ansible_distribution }}.yml'
- - '{{ ansible_os_family }}.yml'
- - 'defaults.yml'
+ - include_vars: '{{ item }}'
+ with_first_found:
+ - files:
+ - '{{ ansible_distribution }}.yml'
+ - '{{ ansible_os_family }}.yml'
+ - 'defaults.yml'
- - include_tasks: '{{ item }}'
- with_first_found:
- - files:
- - 'install_{{ ansible_distribution }}.yml' # CentOS
- - 'install_{{ ansible_os_family }}.yml' # RedHat
- - 'install_{{ ansible_system }}.yml' # Linux
+ - include_tasks: '{{ item }}'
+ with_first_found:
+ - files:
+ - 'install_{{ ansible_distribution }}.yml' # CentOS
+ - 'install_{{ ansible_os_family }}.yml' # RedHat
+ - 'install_{{ ansible_system }}.yml' # Linux
- - include_tasks: test.yml
- with_items:
+ - include_tasks: test.yml
+ with_items:
- { username: '', password: '' }
- { username: 'testétest', password: 'passéword' } # non-ASCII credentials
- loop_control:
- loop_var: credentials
+ loop_control:
+ loop_var: credentials
always:
- - include_tasks: '{{ item }}'
- with_first_found:
- - files:
- - 'uninstall_{{ ansible_distribution }}.yml' # CentOS
- - 'uninstall_{{ ansible_os_family }}.yml' # RedHat
- - 'uninstall_{{ ansible_system }}.yml' # Linux
+ - include_tasks: '{{ item }}'
+ with_first_found:
+ - files:
+ - 'uninstall_{{ ansible_distribution }}.yml' # CentOS
+ - 'uninstall_{{ ansible_os_family }}.yml' # RedHat
+ - 'uninstall_{{ ansible_system }}.yml' # Linux
- - file:
- path: '{{ supervisord_sock_path.path }}'
- state: absent
+ - file:
+ path: '{{ supervisord_sock_path.path }}'
+ state: absent
diff --git a/tests/integration/targets/sysrc/files/10394.conf b/tests/integration/targets/sysrc/files/10394.conf
new file mode 100644
index 0000000000..fe0bc5b145
--- /dev/null
+++ b/tests/integration/targets/sysrc/files/10394.conf
@@ -0,0 +1,7 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+k1="v1"
+jail_list="
+ foo
+ bar"
\ No newline at end of file
diff --git a/tests/integration/targets/sysrc/tasks/main.yml b/tests/integration/targets/sysrc/tasks/main.yml
index d0d1957a08..f1135d488f 100644
--- a/tests/integration/targets/sysrc/tasks/main.yml
+++ b/tests/integration/targets/sysrc/tasks/main.yml
@@ -141,12 +141,12 @@
#
# NOTE: currently fails with FreeBSD 12 with minor version less than 4
# NOTE: currently fails with FreeBSD 13 with minor version less than 4
- # NOTE: currently fails with FreeBSD 14 with minor version less than 1
+ # NOTE: currently fails with FreeBSD 14 with minor version less than 2
#
when: >-
ansible_distribution_version is version('12.4', '>=') and ansible_distribution_version is version('13', '<')
or ansible_distribution_version is version('13.4', '>=') and ansible_distribution_version is version('14', '<')
- or ansible_distribution_version is version('14.1', '>=')
+ or ansible_distribution_version is version('14.2', '>=')
block:
- name: Setup testjail
include_tasks: setup-testjail.yml
@@ -333,13 +333,115 @@
- not sysrc_value_absent_idempotent.changed
- "'sysrc_delim=\"t1,t2\"' in sysrc_delim_content.stdout_lines"
- "'sysrc_delim_delete' not in sysrc_delim_content.stdout_lines"
+
+ ##
+ ## sysrc - value contains equals sign
+ ##
+ - name: Value contains equals sign
+ vars:
+ value_1: "-u spamd -x --allow-tell --max-spare=1 --listen=*"
+ value_2: "-u spamd -x --allow-tell --max-spare=1 --listen=localhost"
+ block:
+
+ - name: Add spamd_flags
+ sysrc:
+ name: spamd_flags
+ value: "{{ value_1 }}"
+ register: sysrc_equals_sign_1
+
+ - name: Change spamd_flags
+ sysrc:
+ name: spamd_flags
+ value: "{{ value_2 }}"
+ register: sysrc_equals_sign_2
+
+ - name: Get file content
+ command: sysrc -a
+ register: sysrc_content
+
+ - name: Ensure sysrc did as intended with values that contains equals sign
+ vars:
+ conf: "{{ sysrc_content.stdout | from_yaml }}"
+ assert:
+ that:
+ - "value_1 == sysrc_equals_sign_1.value"
+ - sysrc_equals_sign_2.changed
+ - "value_2 == sysrc_equals_sign_2.value"
+ - "value_2 == conf.spamd_flags"
+
+ ##
+ ## sysrc - #10004 state=absent when using default settings will report `changed=true`
+ ##
+ - name: Test that a key from /etc/defaults/rc.conf is not used to mark changed
+ sysrc:
+ name: dumpdev
+ state: absent
+ path: /tmp/10004.conf
+ register: sysrc_10004_absent
+ failed_when: sysrc_10004_absent.changed
+
+ - name: Test that a delimited key from /etc/defaults/rc.conf is not used to mark changed
+ sysrc:
+ name: rc_conf_files
+ state: value_absent
+ path: /tmp/10004.conf
+ register: sysrc_10004_value_absent
+ failed_when: sysrc_10004_value_absent.changed
+
+ - name: Test that a key from /etc/defaults/rc.conf is not used to mark changed without a path
+ sysrc:
+ name: static_routes
+ state: absent
+ register: sysrc_absent_default
+ failed_when: sysrc_absent_default.changed
+
+ ##
+ ## sysrc - #10394 Ensure that files with multi-line values work
+ ##
+ - name: Copy 10394.conf
+ copy:
+ src: 10394.conf
+ dest: /tmp/10394.conf
+
+ - name: Change value for k1
+ sysrc:
+ name: k1
+ value: v2
+ path: /tmp/10394.conf
+ register: sysrc_10394_changed
+
+ - name: Get file content
+ shell: "cat /tmp/10394.conf"
+ register: sysrc_10394_content
+
+ - name: Ensure sysrc changed k1 from v1 to v2
+ assert:
+ that:
+ - sysrc_10394_changed.changed
+ - >
+ 'k1="v2"' in sysrc_10394_content.stdout_lines
+
+ ##
+ ## sysrc - additional tests
+ ##
+ - name: Ensure failure on OID style name since sysrc does not support them
+ sysrc:
+ name: not.valid.var
+ value: test
+ register: sysrc_name_check
+ failed_when:
+ - sysrc_name_check is not failed
+ - >
+ 'Name may only contain alpha-numeric and underscore characters' != sysrc_name_check.msg
+
always:
+
- name: Restore /etc/rc.conf
copy:
- content: "{{ cached_etc_rcconf_content }}"
+ content: "{{ cached_etc_rcconf_content.stdout }}"
dest: /etc/rc.conf
- name: Restore /boot/loader.conf
copy:
- content: "{{ cached_boot_loaderconf_content }}"
+ content: "{{ cached_boot_loaderconf_content.stdout }}"
dest: /boot/loader.conf
diff --git a/tests/integration/targets/systemd_info/tasks/main.yml b/tests/integration/targets/systemd_info/tasks/main.yml
index dabc5fae9a..e41816195b 100644
--- a/tests/integration/targets/systemd_info/tasks/main.yml
+++ b/tests/integration/targets/systemd_info/tasks/main.yml
@@ -14,13 +14,13 @@
- name: Test systemd_facts
block:
- - name: Run tests
- import_tasks: tests.yml
+ - name: Run tests
+ import_tasks: tests.yml
when: >
- (ansible_distribution in ['RedHat', 'CentOS', 'ScientificLinux'] and ansible_distribution_major_version is version('7', '>=')) or
- ansible_distribution == 'Fedora' or
- (ansible_distribution == 'Ubuntu' and ansible_distribution_version is version('15.04', '>=')) or
- (ansible_distribution == 'Debian' and ansible_distribution_version is version('8', '>=')) or
+ (ansible_distribution in ['RedHat', 'CentOS', 'ScientificLinux'] and ansible_distribution_major_version is version('7', '>=')) or
+ ansible_distribution == 'Fedora' or
+ (ansible_distribution == 'Ubuntu' and ansible_distribution_version is version('15.04', '>=')) or
+ (ansible_distribution == 'Debian' and ansible_distribution_version is version('8', '>=')) or
ansible_os_family == 'Suse' or
ansible_distribution == 'Archlinux'
\ No newline at end of file
diff --git a/tests/integration/targets/terraform/tasks/complex_variables.yml b/tests/integration/targets/terraform/tasks/complex_variables.yml
index 9788a3eed1..81c708e34d 100644
--- a/tests/integration/targets/terraform/tasks/complex_variables.yml
+++ b/tests/integration/targets/terraform/tasks/complex_variables.yml
@@ -7,7 +7,7 @@
ansible.builtin.file:
path: "{{ terraform_project_dir }}/complex_vars"
state: directory
- mode: 0755
+ mode: "0755"
- name: copy terraform files to work space
ansible.builtin.copy:
@@ -49,10 +49,10 @@
one
two
list_of_lists:
- - [ 1 ]
- - [ 11, 12, 13 ]
- - [ 2 ]
- - [ 3 ]
+ - [1]
+ - [11, 12, 13]
+ - [2]
+ - [3]
state: present
register: terraform_init_result
diff --git a/tests/integration/targets/terraform/tasks/main.yml b/tests/integration/targets/terraform/tasks/main.yml
index d04757d8e4..513c3ef225 100644
--- a/tests/integration/targets/terraform/tasks/main.yml
+++ b/tests/integration/targets/terraform/tasks/main.yml
@@ -8,20 +8,20 @@
- name: Check for existing Terraform in path
block:
- - name: Check if terraform is present in path
- ansible.builtin.command: "command -v terraform"
- register: terraform_binary_path
- ignore_errors: true
+ - name: Check if terraform is present in path
+ ansible.builtin.command: "command -v terraform"
+ register: terraform_binary_path
+ ignore_errors: true
- - name: Check Terraform version
- ansible.builtin.command: terraform version
- register: terraform_version_output
- when: terraform_binary_path.rc == 0
+ - name: Check Terraform version
+ ansible.builtin.command: terraform version
+ register: terraform_version_output
+ when: terraform_binary_path.rc == 0
- - name: Set terraform version
- ansible.builtin.set_fact:
- terraform_version_installed: "{{ terraform_version_output.stdout | regex_search('(?!Terraform.*v)([0-9]+\\.[0-9]+\\.[0-9]+)') }}"
- when: terraform_version_output.changed
+ - name: Set terraform version
+ ansible.builtin.set_fact:
+ terraform_version_installed: "{{ terraform_version_output.stdout | regex_search('(?!Terraform.*v)([0-9]+\\.[0-9]+\\.[0-9]+)') }}"
+ when: terraform_version_output.changed
# This block handles the tasks of installing the Terraform binary. This happens if there is no existing
# terraform in $PATH OR version does not match `terraform_version`.
@@ -29,22 +29,22 @@
- name: Execute Terraform install tasks
block:
- - name: Install Terraform
- ansible.builtin.debug:
- msg: "Installing terraform {{ terraform_version }}, found: {{ terraform_version_installed | default('no terraform binary found') }}."
+ - name: Install Terraform
+ ansible.builtin.debug:
+ msg: "Installing terraform {{ terraform_version }}, found: {{ terraform_version_installed | default('no terraform binary found') }}."
- - name: Ensure unzip is present
- ansible.builtin.package:
- name: unzip
- state: present
+ - name: Ensure unzip is present
+ ansible.builtin.package:
+ name: unzip
+ state: present
- - name: Install Terraform binary
- ansible.builtin.unarchive:
- src: "{{ terraform_url }}"
- dest: "{{ remote_tmp_dir }}"
- mode: 0755
- remote_src: true
- validate_certs: "{{ validate_certs }}"
+ - name: Install Terraform binary
+ ansible.builtin.unarchive:
+ src: "{{ terraform_url }}"
+ dest: "{{ remote_tmp_dir }}"
+ mode: "0755"
+ remote_src: true
+ validate_certs: "{{ validate_certs }}"
when: terraform_version_installed is not defined or terraform_version_installed != terraform_version
diff --git a/tests/integration/targets/terraform/tasks/test_provider_upgrade.yml b/tests/integration/targets/terraform/tasks/test_provider_upgrade.yml
index b20182c9f3..28d1d8ea22 100644
--- a/tests/integration/targets/terraform/tasks/test_provider_upgrade.yml
+++ b/tests/integration/targets/terraform/tasks/test_provider_upgrade.yml
@@ -7,7 +7,7 @@
file:
path: "{{ terraform_project_dir }}/{{ item['name'] }}"
state: directory
- mode: 0755
+ mode: "0755"
loop: "{{ terraform_provider_versions }}"
loop_control:
index_var: provider_index
diff --git a/tests/integration/targets/terraform/vars/main.yml b/tests/integration/targets/terraform/vars/main.yml
index 1032adee4f..17032ea81f 100644
--- a/tests/integration/targets/terraform/vars/main.yml
+++ b/tests/integration/targets/terraform/vars/main.yml
@@ -24,11 +24,11 @@ terraform_provider_upgrade: true
# list of dicts containing Terraform providers that will be tested
# The null provider is a good candidate, as it's small and has no external dependencies
-terraform_provider_versions:
- - name: "null"
+terraform_provider_versions:
+ - name: "null"
source: "hashicorp/null"
version: ">=2.0.0, < 3.0.0"
- - name: "null"
+ - name: "null"
source: "hashicorp/null"
version: ">=3.0.0"
diff --git a/tests/integration/targets/test_a_module/runme.yml b/tests/integration/targets/test_a_module/runme.yml
index 4b7a5ec2ce..6ab0a2f7fa 100644
--- a/tests/integration/targets/test_a_module/runme.yml
+++ b/tests/integration/targets/test_a_module/runme.yml
@@ -39,4 +39,3 @@
- "'onyx_pfc_interface' is not community.general.a_module"
# Tombstoned module
- "'community.general.docker_image_facts' is not community.general.a_module"
- when: ansible_version.string is version('2.10.0', '>=')
diff --git a/tests/integration/targets/test_ansible_type/tasks/tasks.yml b/tests/integration/targets/test_ansible_type/tasks/tasks.yml
index eb1ba2ec66..443b36d36f 100644
--- a/tests/integration/targets/test_ansible_type/tasks/tasks.yml
+++ b/tests/integration/targets/test_ansible_type/tasks/tasks.yml
@@ -63,7 +63,7 @@
assert:
that: '"abc" is community.general.ansible_type(dtype)'
success_msg: '"abc" is {{ dtype }}'
- fail_msg: '"abc" is {{ result }}'
+ fail_msg: '"abc" is {{ result }}'
quiet: '{{ quiet_test | default(true) | bool }}'
vars:
result: '{{ "abc" | community.general.reveal_ansible_type }}'
@@ -73,7 +73,7 @@
assert:
that: '123 is community.general.ansible_type(dtype)'
success_msg: '123 is {{ dtype }}'
- fail_msg: '123 is {{ result }}'
+ fail_msg: '123 is {{ result }}'
quiet: '{{ quiet_test | default(true) | bool }}'
vars:
result: '{{ 123 | community.general.reveal_ansible_type }}'
@@ -83,7 +83,7 @@
assert:
that: '123.45 is community.general.ansible_type(dtype)'
success_msg: '123.45 is {{ dtype }}'
- fail_msg: '123.45 is {{ result }}'
+ fail_msg: '123.45 is {{ result }}'
quiet: '{{ quiet_test | default(true) | bool }}'
vars:
result: '{{ 123.45 | community.general.reveal_ansible_type }}'
@@ -93,7 +93,7 @@
assert:
that: 'true is community.general.ansible_type(dtype)'
success_msg: 'true is {{ dtype }}'
- fail_msg: 'true is {{ result }}'
+ fail_msg: 'true is {{ result }}'
quiet: '{{ quiet_test | default(true) | bool }}'
vars:
result: '{{ true | community.general.reveal_ansible_type }}'
@@ -103,7 +103,7 @@
assert:
that: '["a", "b", "c"] is community.general.ansible_type(dtype)'
success_msg: '["a", "b", "c"] is {{ dtype }}'
- fail_msg: '["a", "b", "c"] is {{ result }}'
+ fail_msg: '["a", "b", "c"] is {{ result }}'
quiet: '{{ quiet_test | default(true) | bool }}'
vars:
result: '{{ ["a", "b", "c"] | community.general.reveal_ansible_type }}'
@@ -113,7 +113,7 @@
assert:
that: '[{"a": 1}, {"b": 2}] is community.general.ansible_type(dtype)'
success_msg: '[{"a": 1}, {"b": 2}] is {{ dtype }}'
- fail_msg: '[{"a": 1}, {"b": 2}] is {{ result }}'
+ fail_msg: '[{"a": 1}, {"b": 2}] is {{ result }}'
quiet: '{{ quiet_test | default(true) | bool }}'
vars:
result: '{{ [{"a": 1}, {"b": 2}] | community.general.reveal_ansible_type }}'
@@ -123,7 +123,7 @@
assert:
that: '{"a": 1} is community.general.ansible_type(dtype)'
success_msg: '{"a": 1} is {{ dtype }}'
- fail_msg: '{"a": 1} is {{ result }}'
+ fail_msg: '{"a": 1} is {{ result }}'
quiet: '{{ quiet_test | default(true) | bool }}'
vars:
result: '{{ {"a": 1} | community.general.reveal_ansible_type }}'
@@ -133,7 +133,7 @@
assert:
that: '{"a": 1, "b": 2} is community.general.ansible_type(dtype)'
success_msg: '{"a": 1, "b": 2} is {{ dtype }}'
- fail_msg: '{"a": 1, "b": 2} is {{ result }}'
+ fail_msg: '{"a": 1, "b": 2} is {{ result }}'
quiet: '{{ quiet_test | default(true) | bool }}'
vars:
result: '{{ {"a": 1, "b": 2} | community.general.reveal_ansible_type }}'
@@ -184,7 +184,7 @@
_AnsibleTaggedStr: str
_AnsibleTaggedInt: int
_AnsibleTaggedFloat: float
- data: {'a': 1, 'b': 1.1, 'c': 'abc', 'd': True, 'e': ['x', 'y', 'z'], 'f': {'x': 1, 'y': 2}}
+ data: {'a': 1, 'b': 1.1, 'c': 'abc', 'd': true, 'e': ['x', 'y', 'z'], 'f': {'x': 1, 'y': 2}}
result: '{{ data | community.general.reveal_ansible_type(alias) }}'
dtype: dict[str, bool|dict|float|int|list|str]
@@ -200,7 +200,7 @@
_AnsibleTaggedStr: str
_AnsibleTaggedInt: int
_AnsibleTaggedFloat: float
- data: [1, 2, 1.1, 'abc', True, ['x', 'y', 'z'], {'x': 1, 'y': 2}]
+ data: [1, 2, 1.1, 'abc', true, ['x', 'y', 'z'], {'x': 1, 'y': 2}]
result: '{{ data | community.general.reveal_ansible_type(alias) }}'
dtype: list[bool|dict|float|int|list|str]
diff --git a/tests/integration/targets/timezone/tasks/test.yml b/tests/integration/targets/timezone/tasks/test.yml
index 975526800e..1a6787484d 100644
--- a/tests/integration/targets/timezone/tasks/test.yml
+++ b/tests/integration/targets/timezone/tasks/test.yml
@@ -431,7 +431,7 @@
- hwclock_set_local_deleted_adjtime_local.changed
- hwclock_set_local_deleted_adjtime_local.diff.after.hwclock == 'local'
- hwclock_set_local_deleted_adjtime_local.diff.before.hwclock == 'UTC'
-
+
##
## test set hwclock with conf file deleted
diff --git a/tests/integration/targets/ufw/aliases b/tests/integration/targets/ufw/aliases
index 3c6c855221..07227edc3f 100644
--- a/tests/integration/targets/ufw/aliases
+++ b/tests/integration/targets/ufw/aliases
@@ -15,6 +15,8 @@ skip/rhel9.2 # FIXME
skip/rhel9.3 # FIXME
skip/rhel9.4 # FIXME
skip/rhel9.5 # FIXME
+skip/rhel9.6 # FIXME
+skip/rhel10.0 # FIXME
skip/docker
needs/root
needs/target/setup_epel
diff --git a/tests/integration/targets/ufw/tasks/main.yml b/tests/integration/targets/ufw/tasks/main.yml
index 5fba2fa4d7..83e6a5138a 100644
--- a/tests/integration/targets/ufw/tasks/main.yml
+++ b/tests/integration/targets/ufw/tasks/main.yml
@@ -27,19 +27,19 @@
# Run the tests
- block:
- - include_tasks: run-test.yml
- with_fileglob:
- - "tests/*.yml"
+ - include_tasks: run-test.yml
+ with_fileglob:
+ - "tests/*.yml"
become: true
# Cleanup
always:
- - pause:
- # ufw creates backups of the rule files with a timestamp; if reset is called
- # twice in a row fast enough (so that both timestamps are taken in the same second),
- # the second call will notice that the backup files are already there and fail.
- # Waiting one second fixes this problem.
- seconds: 1
- - name: Reset ufw to factory defaults and disable
- ufw:
- state: reset
+ - pause:
+ # ufw creates backups of the rule files with a timestamp; if reset is called
+ # twice in a row fast enough (so that both timestamps are taken in the same second),
+ # the second call will notice that the backup files are already there and fail.
+ # Waiting one second fixes this problem.
+ seconds: 1
+ - name: Reset ufw to factory defaults and disable
+ ufw:
+ state: reset
diff --git a/tests/integration/targets/ufw/tasks/tests/basic.yml b/tests/integration/targets/ufw/tasks/tests/basic.yml
index 8c179d7aed..3be130da78 100644
--- a/tests/integration/targets/ufw/tasks/tests/basic.yml
+++ b/tests/integration/targets/ufw/tasks/tests/basic.yml
@@ -27,10 +27,10 @@
register: enable_idem_check
- assert:
that:
- - enable_check is changed
- - enable is changed
- - enable_idem is not changed
- - enable_idem_check is not changed
+ - enable_check is changed
+ - enable is changed
+ - enable_idem is not changed
+ - enable_idem_check is not changed
# ############################################
- name: ipv4 allow (check mode)
@@ -61,10 +61,10 @@
register: ipv4_allow_idem_check
- assert:
that:
- - ipv4_allow_check is changed
- - ipv4_allow is changed
- - ipv4_allow_idem is not changed
- - ipv4_allow_idem_check is not changed
+ - ipv4_allow_check is changed
+ - ipv4_allow is changed
+ - ipv4_allow_idem is not changed
+ - ipv4_allow_idem_check is not changed
# ############################################
- name: delete ipv4 allow (check mode)
@@ -99,10 +99,10 @@
register: delete_ipv4_allow_idem_check
- assert:
that:
- - delete_ipv4_allow_check is changed
- - delete_ipv4_allow is changed
- - delete_ipv4_allow_idem is not changed
- - delete_ipv4_allow_idem_check is not changed
+ - delete_ipv4_allow_check is changed
+ - delete_ipv4_allow is changed
+ - delete_ipv4_allow_idem is not changed
+ - delete_ipv4_allow_idem_check is not changed
# ############################################
- name: ipv6 allow (check mode)
@@ -133,10 +133,10 @@
register: ipv6_allow_idem_check
- assert:
that:
- - ipv6_allow_check is changed
- - ipv6_allow is changed
- - ipv6_allow_idem is not changed
- - ipv6_allow_idem_check is not changed
+ - ipv6_allow_check is changed
+ - ipv6_allow is changed
+ - ipv6_allow_idem is not changed
+ - ipv6_allow_idem_check is not changed
# ############################################
- name: delete ipv6 allow (check mode)
@@ -171,10 +171,10 @@
register: delete_ipv6_allow_idem_check
- assert:
that:
- - delete_ipv6_allow_check is changed
- - delete_ipv6_allow is changed
- - delete_ipv6_allow_idem is not changed
- - delete_ipv6_allow_idem_check is not changed
+ - delete_ipv6_allow_check is changed
+ - delete_ipv6_allow is changed
+ - delete_ipv6_allow_idem is not changed
+ - delete_ipv6_allow_idem_check is not changed
# ############################################
@@ -206,10 +206,10 @@
register: ipv4_allow_idem_check
- assert:
that:
- - ipv4_allow_check is changed
- - ipv4_allow is changed
- - ipv4_allow_idem is not changed
- - ipv4_allow_idem_check is not changed
+ - ipv4_allow_check is changed
+ - ipv4_allow is changed
+ - ipv4_allow_idem is not changed
+ - ipv4_allow_idem_check is not changed
# ############################################
- name: delete ipv4 allow (check mode)
@@ -244,10 +244,10 @@
register: delete_ipv4_allow_idem_check
- assert:
that:
- - delete_ipv4_allow_check is changed
- - delete_ipv4_allow is changed
- - delete_ipv4_allow_idem is not changed
- - delete_ipv4_allow_idem_check is not changed
+ - delete_ipv4_allow_check is changed
+ - delete_ipv4_allow is changed
+ - delete_ipv4_allow_idem is not changed
+ - delete_ipv4_allow_idem_check is not changed
# ############################################
- name: ipv6 allow (check mode)
@@ -278,10 +278,10 @@
register: ipv6_allow_idem_check
- assert:
that:
- - ipv6_allow_check is changed
- - ipv6_allow is changed
- - ipv6_allow_idem is not changed
- - ipv6_allow_idem_check is not changed
+ - ipv6_allow_check is changed
+ - ipv6_allow is changed
+ - ipv6_allow_idem is not changed
+ - ipv6_allow_idem_check is not changed
# ############################################
- name: delete ipv6 allow (check mode)
@@ -316,10 +316,10 @@
register: delete_ipv6_allow_idem_check
- assert:
that:
- - delete_ipv6_allow_check is changed
- - delete_ipv6_allow is changed
- - delete_ipv6_allow_idem is not changed
- - delete_ipv6_allow_idem_check is not changed
+ - delete_ipv6_allow_check is changed
+ - delete_ipv6_allow is changed
+ - delete_ipv6_allow_idem is not changed
+ - delete_ipv6_allow_idem_check is not changed
# ############################################
- name: Reload ufw
@@ -333,8 +333,8 @@
register: reload_check
- assert:
that:
- - reload is changed
- - reload_check is changed
+ - reload is changed
+ - reload_check is changed
# ############################################
- name: Disable (check mode)
@@ -357,10 +357,10 @@
register: disable_idem_check
- assert:
that:
- - disable_check is changed
- - disable is changed
- - disable_idem is not changed
- - disable_idem_check is not changed
+ - disable_check is changed
+ - disable is changed
+ - disable_idem is not changed
+ - disable_idem_check is not changed
# ############################################
- name: Re-enable
@@ -400,7 +400,7 @@
register: reset_idem_check
- assert:
that:
- - reset_check is changed
- - reset is changed
- - reset_idem is changed
- - reset_idem_check is changed
+ - reset_check is changed
+ - reset is changed
+ - reset_idem is changed
+ - reset_idem_check is changed
diff --git a/tests/integration/targets/ufw/tasks/tests/global-state.yml b/tests/integration/targets/ufw/tasks/tests/global-state.yml
index f5f1007510..3913a552f3 100644
--- a/tests/integration/targets/ufw/tasks/tests/global-state.yml
+++ b/tests/integration/targets/ufw/tasks/tests/global-state.yml
@@ -52,14 +52,14 @@
LC_ALL: C
- assert:
that:
- - logging_check is changed
- - logging is changed
- - "ufw_logging.stdout == 'Logging: on (low)'"
- - logging_idem is not changed
- - logging_idem_check is not changed
- - "ufw_logging_change.stdout == 'Logging: on (full)'"
- - logging_change is changed
- - logging_change_check is changed
+ - logging_check is changed
+ - logging is changed
+ - "ufw_logging.stdout == 'Logging: on (low)'"
+ - logging_idem is not changed
+ - logging_idem_check is not changed
+ - "ufw_logging_change.stdout == 'Logging: on (full)'"
+ - logging_change is changed
+ - logging_change_check is changed
# ############################################
- name: Default (check mode)
@@ -138,17 +138,17 @@
register: default_change_implicit_idem
- assert:
that:
- - default_check is changed
- - default is changed
- - "'reject (incoming)' in ufw_defaults.stdout"
- - default_idem is not changed
- - default_idem_check is not changed
- - default_change_check is changed
- - default_change is changed
- - "'allow (incoming)' in ufw_defaults_change.stdout"
- - default_change_2 is changed
- - default_change_implicit_check is changed
- - default_change_implicit is changed
- - default_change_implicit_idem_check is not changed
- - default_change_implicit_idem is not changed
- - "'allow (incoming)' in ufw_defaults_change_implicit.stdout"
+ - default_check is changed
+ - default is changed
+ - "'reject (incoming)' in ufw_defaults.stdout"
+ - default_idem is not changed
+ - default_idem_check is not changed
+ - default_change_check is changed
+ - default_change is changed
+ - "'allow (incoming)' in ufw_defaults_change.stdout"
+ - default_change_2 is changed
+ - default_change_implicit_check is changed
+ - default_change_implicit is changed
+ - default_change_implicit_idem_check is not changed
+ - default_change_implicit_idem is not changed
+ - "'allow (incoming)' in ufw_defaults_change_implicit.stdout"
diff --git a/tests/integration/targets/ufw/tasks/tests/insert_relative_to.yml b/tests/integration/targets/ufw/tasks/tests/insert_relative_to.yml
index 67328a0e3f..975600036f 100644
--- a/tests/integration/targets/ufw/tasks/tests/insert_relative_to.yml
+++ b/tests/integration/targets/ufw/tasks/tests/insert_relative_to.yml
@@ -71,14 +71,14 @@
register: ufw_status
- assert:
that:
- - ufw_status.stdout_lines == expected_stdout
+ - ufw_status.stdout_lines == expected_stdout
vars:
expected_stdout:
- - "0.0.0.0 10"
- - "0.0.0.0 22"
- - "0.0.0.0 11"
- - "0.0.0.0 23"
- - ":: 110"
- - ":: 122"
- - ":: 111"
- - ":: 123"
+ - "0.0.0.0 10"
+ - "0.0.0.0 22"
+ - "0.0.0.0 11"
+ - "0.0.0.0 23"
+ - ":: 110"
+ - ":: 122"
+ - ":: 111"
+ - ":: 123"
diff --git a/tests/integration/targets/wakeonlan/tasks/main.yml b/tests/integration/targets/wakeonlan/tasks/main.yml
index 0597480318..22bb0706ef 100644
--- a/tests/integration/targets/wakeonlan/tasks/main.yml
+++ b/tests/integration/targets/wakeonlan/tasks/main.yml
@@ -28,8 +28,8 @@
- name: Check error message
assert:
that:
- - incorrect_mac_length is failed
- - incorrect_mac_length.msg is search('Incorrect MAC address length')
+ - incorrect_mac_length is failed
+ - incorrect_mac_length.msg is search('Incorrect MAC address length')
- name: Provide an incorrect MAC format
wakeonlan:
@@ -41,8 +41,8 @@
- name: Check error message
assert:
that:
- - incorrect_mac_format is failed
- - incorrect_mac_format.msg is search('Incorrect MAC address format')
+ - incorrect_mac_format is failed
+ - incorrect_mac_format.msg is search('Incorrect MAC address format')
- name: Cause a socket error
wakeonlan:
@@ -54,5 +54,5 @@
- name: Check error message
assert:
that:
- - incorrect_broadcast_address is failed
- - incorrect_broadcast_address.msg is search('not known|Name does not resolve')
+ - incorrect_broadcast_address is failed
+ - incorrect_broadcast_address.msg is search('not known|Name does not resolve')
diff --git a/tests/integration/targets/xattr/tasks/test.yml b/tests/integration/targets/xattr/tasks/test.yml
index 7fe852d77a..dfc4da60e4 100644
--- a/tests/integration/targets/xattr/tasks/test.yml
+++ b/tests/integration/targets/xattr/tasks/test.yml
@@ -23,11 +23,11 @@
- assert:
that:
- - "xattr_set_result.changed"
- - "xattr_get_all_result['xattr']['user.foo'] == 'bar'"
- - "not xattr_get_all_result.changed"
- - "xattr_get_specific_result['xattr']['user.foo'] == 'bar'"
- - "not xattr_get_specific_result.changed"
+ - "xattr_set_result.changed"
+ - "xattr_get_all_result['xattr']['user.foo'] == 'bar'"
+ - "not xattr_get_all_result.changed"
+ - "xattr_get_specific_result['xattr']['user.foo'] == 'bar'"
+ - "not xattr_get_specific_result.changed"
- name: Set attribute again
xattr:
@@ -39,7 +39,7 @@
- assert:
that:
- - "not xattr_set_again_result.changed"
+ - "not xattr_set_again_result.changed"
- name: Unset attribute
xattr:
@@ -55,9 +55,9 @@
- assert:
that:
- - "xattr_unset_result.changed"
- - "xattr_get_after_unset_result['xattr'] == {}"
- - "not xattr_get_after_unset_result.changed"
+ - "xattr_unset_result.changed"
+ - "xattr_get_after_unset_result['xattr'] == {}"
+ - "not xattr_get_after_unset_result.changed"
- name: Unset attribute again
xattr:
@@ -69,4 +69,4 @@
- assert:
that:
- - "not xattr_set_again_result.changed"
+ - "not xattr_set_again_result.changed"
diff --git a/tests/integration/targets/xfs_quota/tasks/gquota.yml b/tests/integration/targets/xfs_quota/tasks/gquota.yml
index caca1d341d..3fca599221 100644
--- a/tests/integration/targets/xfs_quota/tasks/gquota.yml
+++ b/tests/integration/targets/xfs_quota/tasks/gquota.yml
@@ -12,136 +12,136 @@
dev: '{{ remote_tmp_dir }}/img-gquota'
fstype: xfs
- block:
- - name: Mount filesystem
- become: true
- ansible.posix.mount:
- fstab: '{{ remote_tmp_dir }}/fstab'
- src: '{{ remote_tmp_dir }}/img-gquota'
- path: '{{ remote_tmp_dir }}/gquota'
- fstype: xfs
- opts: gquota
- state: mounted
- - name: Apply default group limits
- xfs_quota:
- bsoft: '{{ gquota_default_bsoft }}'
- bhard: '{{ gquota_default_bhard }}'
- isoft: '{{ gquota_default_isoft }}'
- ihard: '{{ gquota_default_ihard }}'
- mountpoint: '{{ remote_tmp_dir }}/gquota'
- rtbsoft: '{{ gquota_default_rtbsoft }}'
- rtbhard: '{{ gquota_default_rtbhard }}'
- type: group
- become: true
- register: test_gquota_default_before
- - name: Assert default group limits results
- assert:
- that:
- - test_gquota_default_before.changed
- - test_gquota_default_before.bsoft == gquota_default_bsoft|human_to_bytes
- - test_gquota_default_before.bhard == gquota_default_bhard|human_to_bytes
- - test_gquota_default_before.isoft == gquota_default_isoft
- - test_gquota_default_before.ihard == gquota_default_ihard
- - test_gquota_default_before.rtbsoft == gquota_default_rtbsoft|human_to_bytes
- - test_gquota_default_before.rtbhard == gquota_default_rtbhard|human_to_bytes
- - name: Apply group limits
- xfs_quota:
- bsoft: '{{ gquota_group_bsoft }}'
- bhard: '{{ gquota_group_bhard }}'
- isoft: '{{ gquota_group_isoft }}'
- ihard: '{{ gquota_group_ihard }}'
- mountpoint: '{{ remote_tmp_dir }}/gquota'
- name: xfsquotauser
- rtbsoft: '{{ gquota_group_rtbsoft }}'
- rtbhard: '{{ gquota_group_rtbhard }}'
- type: group
- become: true
- register: test_gquota_group_before
- - name: Assert group limits results for xfsquotauser
- assert:
- that:
- - test_gquota_group_before.changed
- - test_gquota_group_before.bsoft == gquota_group_bsoft|human_to_bytes
- - test_gquota_group_before.bhard == gquota_group_bhard|human_to_bytes
- - test_gquota_group_before.isoft == gquota_group_isoft
- - test_gquota_group_before.ihard == gquota_group_ihard
- - test_gquota_group_before.rtbsoft == gquota_group_rtbsoft|human_to_bytes
- - test_gquota_group_before.rtbhard == gquota_group_rtbhard|human_to_bytes
- - name: Re-apply default group limits
- xfs_quota:
- bsoft: '{{ gquota_default_bsoft }}'
- bhard: '{{ gquota_default_bhard }}'
- isoft: '{{ gquota_default_isoft }}'
- ihard: '{{ gquota_default_ihard }}'
- mountpoint: '{{ remote_tmp_dir }}/gquota'
- rtbsoft: '{{ gquota_default_rtbsoft }}'
- rtbhard: '{{ gquota_default_rtbhard }}'
- type: group
- become: true
- register: test_gquota_default_after
- - name: Assert default group limits results after re-apply
- assert:
- that:
- - not test_gquota_default_after.changed
- - name: Re-apply group limits
- xfs_quota:
- bsoft: '{{ gquota_group_bsoft }}'
- bhard: '{{ gquota_group_bhard }}'
- isoft: '{{ gquota_group_isoft }}'
- ihard: '{{ gquota_group_ihard }}'
- mountpoint: '{{ remote_tmp_dir }}/gquota'
- name: xfsquotauser
- rtbsoft: '{{ gquota_group_rtbsoft }}'
- rtbhard: '{{ gquota_group_rtbhard }}'
- type: group
- become: true
- register: test_gquota_group_after
- - name: Assert group limits results for xfsquotauser after re-apply
- assert:
- that:
- - not test_gquota_group_after.changed
- - name: Reset default group limits
- xfs_quota:
- mountpoint: '{{ remote_tmp_dir }}/gquota'
- state: absent
- type: group
- become: true
- register: test_reset_gquota_default
- - name: Assert reset of default group limits results
- assert:
- that:
- - test_reset_gquota_default.changed
- - test_reset_gquota_default.bsoft == 0
- - test_reset_gquota_default.bhard == 0
- - test_reset_gquota_default.isoft == 0
- - test_reset_gquota_default.ihard == 0
- - test_reset_gquota_default.rtbsoft == 0
- - test_reset_gquota_default.rtbhard == 0
- - name: Reset group limits for xfsquotauser
- xfs_quota:
- mountpoint: '{{ remote_tmp_dir }}/gquota'
- name: xfsquotauser
- state: absent
- type: group
- become: true
- register: test_reset_gquota_group
- - name: Assert reset of default group limits results
- assert:
- that:
- - test_reset_gquota_group.changed
- - test_reset_gquota_group.bsoft == 0
- - test_reset_gquota_group.bhard == 0
- - test_reset_gquota_group.isoft == 0
- - test_reset_gquota_group.ihard == 0
- - test_reset_gquota_group.rtbsoft == 0
- - test_reset_gquota_group.rtbhard == 0
+ - name: Mount filesystem
+ become: true
+ ansible.posix.mount:
+ fstab: '{{ remote_tmp_dir }}/fstab'
+ src: '{{ remote_tmp_dir }}/img-gquota'
+ path: '{{ remote_tmp_dir }}/gquota'
+ fstype: xfs
+ opts: gquota
+ state: mounted
+ - name: Apply default group limits
+ xfs_quota:
+ bsoft: '{{ gquota_default_bsoft }}'
+ bhard: '{{ gquota_default_bhard }}'
+ isoft: '{{ gquota_default_isoft }}'
+ ihard: '{{ gquota_default_ihard }}'
+ mountpoint: '{{ remote_tmp_dir }}/gquota'
+ rtbsoft: '{{ gquota_default_rtbsoft }}'
+ rtbhard: '{{ gquota_default_rtbhard }}'
+ type: group
+ become: true
+ register: test_gquota_default_before
+ - name: Assert default group limits results
+ assert:
+ that:
+ - test_gquota_default_before.changed
+ - test_gquota_default_before.bsoft == gquota_default_bsoft|human_to_bytes
+ - test_gquota_default_before.bhard == gquota_default_bhard|human_to_bytes
+ - test_gquota_default_before.isoft == gquota_default_isoft
+ - test_gquota_default_before.ihard == gquota_default_ihard
+ - test_gquota_default_before.rtbsoft == gquota_default_rtbsoft|human_to_bytes
+ - test_gquota_default_before.rtbhard == gquota_default_rtbhard|human_to_bytes
+ - name: Apply group limits
+ xfs_quota:
+ bsoft: '{{ gquota_group_bsoft }}'
+ bhard: '{{ gquota_group_bhard }}'
+ isoft: '{{ gquota_group_isoft }}'
+ ihard: '{{ gquota_group_ihard }}'
+ mountpoint: '{{ remote_tmp_dir }}/gquota'
+ name: xfsquotauser
+ rtbsoft: '{{ gquota_group_rtbsoft }}'
+ rtbhard: '{{ gquota_group_rtbhard }}'
+ type: group
+ become: true
+ register: test_gquota_group_before
+ - name: Assert group limits results for xfsquotauser
+ assert:
+ that:
+ - test_gquota_group_before.changed
+ - test_gquota_group_before.bsoft == gquota_group_bsoft|human_to_bytes
+ - test_gquota_group_before.bhard == gquota_group_bhard|human_to_bytes
+ - test_gquota_group_before.isoft == gquota_group_isoft
+ - test_gquota_group_before.ihard == gquota_group_ihard
+ - test_gquota_group_before.rtbsoft == gquota_group_rtbsoft|human_to_bytes
+ - test_gquota_group_before.rtbhard == gquota_group_rtbhard|human_to_bytes
+ - name: Re-apply default group limits
+ xfs_quota:
+ bsoft: '{{ gquota_default_bsoft }}'
+ bhard: '{{ gquota_default_bhard }}'
+ isoft: '{{ gquota_default_isoft }}'
+ ihard: '{{ gquota_default_ihard }}'
+ mountpoint: '{{ remote_tmp_dir }}/gquota'
+ rtbsoft: '{{ gquota_default_rtbsoft }}'
+ rtbhard: '{{ gquota_default_rtbhard }}'
+ type: group
+ become: true
+ register: test_gquota_default_after
+ - name: Assert default group limits results after re-apply
+ assert:
+ that:
+ - not test_gquota_default_after.changed
+ - name: Re-apply group limits
+ xfs_quota:
+ bsoft: '{{ gquota_group_bsoft }}'
+ bhard: '{{ gquota_group_bhard }}'
+ isoft: '{{ gquota_group_isoft }}'
+ ihard: '{{ gquota_group_ihard }}'
+ mountpoint: '{{ remote_tmp_dir }}/gquota'
+ name: xfsquotauser
+ rtbsoft: '{{ gquota_group_rtbsoft }}'
+ rtbhard: '{{ gquota_group_rtbhard }}'
+ type: group
+ become: true
+ register: test_gquota_group_after
+ - name: Assert group limits results for xfsquotauser after re-apply
+ assert:
+ that:
+ - not test_gquota_group_after.changed
+ - name: Reset default group limits
+ xfs_quota:
+ mountpoint: '{{ remote_tmp_dir }}/gquota'
+ state: absent
+ type: group
+ become: true
+ register: test_reset_gquota_default
+ - name: Assert reset of default group limits results
+ assert:
+ that:
+ - test_reset_gquota_default.changed
+ - test_reset_gquota_default.bsoft == 0
+ - test_reset_gquota_default.bhard == 0
+ - test_reset_gquota_default.isoft == 0
+ - test_reset_gquota_default.ihard == 0
+ - test_reset_gquota_default.rtbsoft == 0
+ - test_reset_gquota_default.rtbhard == 0
+ - name: Reset group limits for xfsquotauser
+ xfs_quota:
+ mountpoint: '{{ remote_tmp_dir }}/gquota'
+ name: xfsquotauser
+ state: absent
+ type: group
+ become: true
+ register: test_reset_gquota_group
+ - name: Assert reset of default group limits results
+ assert:
+ that:
+ - test_reset_gquota_group.changed
+ - test_reset_gquota_group.bsoft == 0
+ - test_reset_gquota_group.bhard == 0
+ - test_reset_gquota_group.isoft == 0
+ - test_reset_gquota_group.ihard == 0
+ - test_reset_gquota_group.rtbsoft == 0
+ - test_reset_gquota_group.rtbhard == 0
always:
- - name: Unmount filesystem
- become: true
- ansible.posix.mount:
- fstab: '{{ remote_tmp_dir }}/fstab'
- path: '{{ remote_tmp_dir }}/gquota'
- state: unmounted
- - name: Remove disk image
- file:
- path: '{{ remote_tmp_dir }}/img-gquota'
- state: absent
+ - name: Unmount filesystem
+ become: true
+ ansible.posix.mount:
+ fstab: '{{ remote_tmp_dir }}/fstab'
+ path: '{{ remote_tmp_dir }}/gquota'
+ state: unmounted
+ - name: Remove disk image
+ file:
+ path: '{{ remote_tmp_dir }}/img-gquota'
+ state: absent
diff --git a/tests/integration/targets/xfs_quota/tasks/pquota.yml b/tests/integration/targets/xfs_quota/tasks/pquota.yml
index db364ffd5f..439452da24 100644
--- a/tests/integration/targets/xfs_quota/tasks/pquota.yml
+++ b/tests/integration/targets/xfs_quota/tasks/pquota.yml
@@ -17,8 +17,8 @@
state: touch
become: true
loop:
- - projid
- - projects
+ - projid
+ - projects
- name: Add test xfs quota project id
lineinfile:
path: /etc/projid
@@ -32,153 +32,153 @@
state: present
become: true
- block:
- - name: Mount filesystem
- become: true
- ansible.posix.mount:
- fstab: '{{ remote_tmp_dir }}/fstab'
- src: '{{ remote_tmp_dir }}/img-pquota'
- path: '{{ remote_tmp_dir }}/pquota'
- fstype: xfs
- opts: pquota
- state: mounted
- - name: Create test directory
- file:
- path: '{{ remote_tmp_dir }}/pquota/test'
- state: directory
- become: true
- - name: Apply default project limits
- xfs_quota:
- bsoft: '{{ pquota_default_bsoft }}'
- bhard: '{{ pquota_default_bhard }}'
- isoft: '{{ pquota_default_isoft }}'
- ihard: '{{ pquota_default_ihard }}'
- mountpoint: '{{ remote_tmp_dir }}/pquota'
- rtbsoft: '{{ pquota_default_rtbsoft }}'
- rtbhard: '{{ pquota_default_rtbhard }}'
- type: project
- become: true
- register: test_pquota_default_before
- - name: Assert default project limits results
- assert:
- that:
- - test_pquota_default_before.changed
- - test_pquota_default_before.bsoft == pquota_default_bsoft|human_to_bytes
- - test_pquota_default_before.bhard == pquota_default_bhard|human_to_bytes
- - test_pquota_default_before.isoft == pquota_default_isoft
- - test_pquota_default_before.ihard == pquota_default_ihard
- - test_pquota_default_before.rtbsoft == pquota_default_rtbsoft|human_to_bytes
- - test_pquota_default_before.rtbhard == pquota_default_rtbhard|human_to_bytes
- - name: Apply project limits
- xfs_quota:
- bsoft: '{{ pquota_project_bsoft }}'
- bhard: '{{ pquota_project_bhard }}'
- isoft: '{{ pquota_project_isoft }}'
- ihard: '{{ pquota_project_ihard }}'
- mountpoint: '{{ remote_tmp_dir }}/pquota'
- name: xft_quotaval
- rtbsoft: '{{ pquota_project_rtbsoft }}'
- rtbhard: '{{ pquota_project_rtbhard }}'
- type: project
- become: true
- register: test_pquota_project_before
- - name: Assert project limits results for xft_quotaval
- assert:
- that:
- - test_pquota_project_before.changed
- - test_pquota_project_before.bsoft == pquota_project_bsoft|human_to_bytes
- - test_pquota_project_before.bhard == pquota_project_bhard|human_to_bytes
- - test_pquota_project_before.isoft == pquota_project_isoft
- - test_pquota_project_before.ihard == pquota_project_ihard
- - test_pquota_project_before.rtbsoft == pquota_project_rtbsoft|human_to_bytes
- - test_pquota_project_before.rtbhard == pquota_project_rtbhard|human_to_bytes
- - name: Re-apply default project limits
- xfs_quota:
- bsoft: '{{ pquota_default_bsoft }}'
- bhard: '{{ pquota_default_bhard }}'
- isoft: '{{ pquota_default_isoft }}'
- ihard: '{{ pquota_default_ihard }}'
- mountpoint: '{{ remote_tmp_dir }}/pquota'
- rtbsoft: '{{ pquota_default_rtbsoft }}'
- rtbhard: '{{ pquota_default_rtbhard }}'
- type: project
- become: true
- register: test_pquota_default_after
- - name: Assert default project limits results after re-apply
- assert:
- that:
- - not test_pquota_default_after.changed
- - name: Re-apply project limits
- xfs_quota:
- bsoft: '{{ pquota_project_bsoft }}'
- bhard: '{{ pquota_project_bhard }}'
- isoft: '{{ pquota_project_isoft }}'
- ihard: '{{ pquota_project_ihard }}'
- mountpoint: '{{ remote_tmp_dir }}/pquota'
- name: xft_quotaval
- rtbsoft: '{{ pquota_project_rtbsoft }}'
- rtbhard: '{{ pquota_project_rtbhard }}'
- type: project
- become: true
- register: test_pquota_project_after
- - name: Assert project limits results for xft_quotaval after re-apply
- assert:
- that:
- - test_pquota_project_after is not changed
- - name: Reset default project limits
- xfs_quota:
- mountpoint: '{{ remote_tmp_dir }}/pquota'
- state: absent
- type: project
- become: true
- register: test_reset_pquota_default
- - name: Assert reset of default projecy limits results
- assert:
- that:
- - test_reset_pquota_default.changed
- - test_reset_pquota_default.bsoft == 0
- - test_reset_pquota_default.bhard == 0
- - test_reset_pquota_default.isoft == 0
- - test_reset_pquota_default.ihard == 0
- - test_reset_pquota_default.rtbsoft == 0
- - test_reset_pquota_default.rtbhard == 0
- - name: Reset project limits for xft_quotaval
- xfs_quota:
- mountpoint: '{{ remote_tmp_dir }}/pquota'
- name: xft_quotaval
- state: absent
- type: project
- become: true
- register: test_reset_pquota_project
- - name: Assert reset of project limits results for xft_quotaval
- assert:
- that:
- - test_reset_pquota_project.changed
- - test_reset_pquota_project.bsoft == 0
- - test_reset_pquota_project.bhard == 0
- - test_reset_pquota_project.isoft == 0
- - test_reset_pquota_project.ihard == 0
- - test_reset_pquota_project.rtbsoft == 0
- - test_reset_pquota_project.rtbhard == 0
+ - name: Mount filesystem
+ become: true
+ ansible.posix.mount:
+ fstab: '{{ remote_tmp_dir }}/fstab'
+ src: '{{ remote_tmp_dir }}/img-pquota'
+ path: '{{ remote_tmp_dir }}/pquota'
+ fstype: xfs
+ opts: pquota
+ state: mounted
+ - name: Create test directory
+ file:
+ path: '{{ remote_tmp_dir }}/pquota/test'
+ state: directory
+ become: true
+ - name: Apply default project limits
+ xfs_quota:
+ bsoft: '{{ pquota_default_bsoft }}'
+ bhard: '{{ pquota_default_bhard }}'
+ isoft: '{{ pquota_default_isoft }}'
+ ihard: '{{ pquota_default_ihard }}'
+ mountpoint: '{{ remote_tmp_dir }}/pquota'
+ rtbsoft: '{{ pquota_default_rtbsoft }}'
+ rtbhard: '{{ pquota_default_rtbhard }}'
+ type: project
+ become: true
+ register: test_pquota_default_before
+ - name: Assert default project limits results
+ assert:
+ that:
+ - test_pquota_default_before.changed
+ - test_pquota_default_before.bsoft == pquota_default_bsoft|human_to_bytes
+ - test_pquota_default_before.bhard == pquota_default_bhard|human_to_bytes
+ - test_pquota_default_before.isoft == pquota_default_isoft
+ - test_pquota_default_before.ihard == pquota_default_ihard
+ - test_pquota_default_before.rtbsoft == pquota_default_rtbsoft|human_to_bytes
+ - test_pquota_default_before.rtbhard == pquota_default_rtbhard|human_to_bytes
+ - name: Apply project limits
+ xfs_quota:
+ bsoft: '{{ pquota_project_bsoft }}'
+ bhard: '{{ pquota_project_bhard }}'
+ isoft: '{{ pquota_project_isoft }}'
+ ihard: '{{ pquota_project_ihard }}'
+ mountpoint: '{{ remote_tmp_dir }}/pquota'
+ name: xft_quotaval
+ rtbsoft: '{{ pquota_project_rtbsoft }}'
+ rtbhard: '{{ pquota_project_rtbhard }}'
+ type: project
+ become: true
+ register: test_pquota_project_before
+ - name: Assert project limits results for xft_quotaval
+ assert:
+ that:
+ - test_pquota_project_before.changed
+ - test_pquota_project_before.bsoft == pquota_project_bsoft|human_to_bytes
+ - test_pquota_project_before.bhard == pquota_project_bhard|human_to_bytes
+ - test_pquota_project_before.isoft == pquota_project_isoft
+ - test_pquota_project_before.ihard == pquota_project_ihard
+ - test_pquota_project_before.rtbsoft == pquota_project_rtbsoft|human_to_bytes
+ - test_pquota_project_before.rtbhard == pquota_project_rtbhard|human_to_bytes
+ - name: Re-apply default project limits
+ xfs_quota:
+ bsoft: '{{ pquota_default_bsoft }}'
+ bhard: '{{ pquota_default_bhard }}'
+ isoft: '{{ pquota_default_isoft }}'
+ ihard: '{{ pquota_default_ihard }}'
+ mountpoint: '{{ remote_tmp_dir }}/pquota'
+ rtbsoft: '{{ pquota_default_rtbsoft }}'
+ rtbhard: '{{ pquota_default_rtbhard }}'
+ type: project
+ become: true
+ register: test_pquota_default_after
+ - name: Assert default project limits results after re-apply
+ assert:
+ that:
+ - not test_pquota_default_after.changed
+ - name: Re-apply project limits
+ xfs_quota:
+ bsoft: '{{ pquota_project_bsoft }}'
+ bhard: '{{ pquota_project_bhard }}'
+ isoft: '{{ pquota_project_isoft }}'
+ ihard: '{{ pquota_project_ihard }}'
+ mountpoint: '{{ remote_tmp_dir }}/pquota'
+ name: xft_quotaval
+ rtbsoft: '{{ pquota_project_rtbsoft }}'
+ rtbhard: '{{ pquota_project_rtbhard }}'
+ type: project
+ become: true
+ register: test_pquota_project_after
+ - name: Assert project limits results for xft_quotaval after re-apply
+ assert:
+ that:
+ - test_pquota_project_after is not changed
+ - name: Reset default project limits
+ xfs_quota:
+ mountpoint: '{{ remote_tmp_dir }}/pquota'
+ state: absent
+ type: project
+ become: true
+ register: test_reset_pquota_default
+ - name: Assert reset of default projecy limits results
+ assert:
+ that:
+ - test_reset_pquota_default.changed
+ - test_reset_pquota_default.bsoft == 0
+ - test_reset_pquota_default.bhard == 0
+ - test_reset_pquota_default.isoft == 0
+ - test_reset_pquota_default.ihard == 0
+ - test_reset_pquota_default.rtbsoft == 0
+ - test_reset_pquota_default.rtbhard == 0
+ - name: Reset project limits for xft_quotaval
+ xfs_quota:
+ mountpoint: '{{ remote_tmp_dir }}/pquota'
+ name: xft_quotaval
+ state: absent
+ type: project
+ become: true
+ register: test_reset_pquota_project
+ - name: Assert reset of project limits results for xft_quotaval
+ assert:
+ that:
+ - test_reset_pquota_project.changed
+ - test_reset_pquota_project.bsoft == 0
+ - test_reset_pquota_project.bhard == 0
+ - test_reset_pquota_project.isoft == 0
+ - test_reset_pquota_project.ihard == 0
+ - test_reset_pquota_project.rtbsoft == 0
+ - test_reset_pquota_project.rtbhard == 0
always:
- - name: Unmount filesystem
- become: true
- ansible.posix.mount:
- fstab: '{{ remote_tmp_dir }}/fstab'
- path: '{{ remote_tmp_dir }}/pquota'
- state: unmounted
- - name: Remove disk image
- file:
- path: '{{ remote_tmp_dir }}/img-pquota'
- state: absent
- - name: Remove xfs quota project id
- lineinfile:
- path: /etc/projid
- regexp: ^xft_quotaval:99999$
- state: absent
- become: true
- - name: Remove xfs quota project path
- lineinfile:
- path: /etc/projects
- regexp: ^99999:.*$
- state: absent
- become: true
+ - name: Unmount filesystem
+ become: true
+ ansible.posix.mount:
+ fstab: '{{ remote_tmp_dir }}/fstab'
+ path: '{{ remote_tmp_dir }}/pquota'
+ state: unmounted
+ - name: Remove disk image
+ file:
+ path: '{{ remote_tmp_dir }}/img-pquota'
+ state: absent
+ - name: Remove xfs quota project id
+ lineinfile:
+ path: /etc/projid
+ regexp: ^xft_quotaval:99999$
+ state: absent
+ become: true
+ - name: Remove xfs quota project path
+ lineinfile:
+ path: /etc/projects
+ regexp: ^99999:.*$
+ state: absent
+ become: true
diff --git a/tests/integration/targets/xfs_quota/tasks/uquota.yml b/tests/integration/targets/xfs_quota/tasks/uquota.yml
index 36a7eff766..0fcc0b30e0 100644
--- a/tests/integration/targets/xfs_quota/tasks/uquota.yml
+++ b/tests/integration/targets/xfs_quota/tasks/uquota.yml
@@ -12,136 +12,136 @@
dev: '{{ remote_tmp_dir }}/img-uquota'
fstype: xfs
- block:
- - name: Mount filesystem
- become: true
- ansible.posix.mount:
- fstab: '{{ remote_tmp_dir }}/fstab'
- src: '{{ remote_tmp_dir }}/img-uquota'
- path: '{{ remote_tmp_dir }}/uquota'
- fstype: xfs
- opts: uquota
- state: mounted
- - name: Apply default user limits
- xfs_quota:
- bsoft: '{{ uquota_default_bsoft }}'
- bhard: '{{ uquota_default_bhard }}'
- isoft: '{{ uquota_default_isoft }}'
- ihard: '{{ uquota_default_ihard }}'
- mountpoint: '{{ remote_tmp_dir }}/uquota'
- rtbsoft: '{{ uquota_default_rtbsoft }}'
- rtbhard: '{{ uquota_default_rtbhard }}'
- type: user
- become: true
- register: test_uquota_default_before
- - name: Assert default user limits results
- assert:
- that:
- - test_uquota_default_before.changed
- - test_uquota_default_before.bsoft == uquota_default_bsoft|human_to_bytes
- - test_uquota_default_before.bhard == uquota_default_bhard|human_to_bytes
- - test_uquota_default_before.isoft == uquota_default_isoft
- - test_uquota_default_before.ihard == uquota_default_ihard
- - test_uquota_default_before.rtbsoft == uquota_default_rtbsoft|human_to_bytes
- - test_uquota_default_before.rtbhard == uquota_default_rtbhard|human_to_bytes
- - name: Apply user limits
- xfs_quota:
- bsoft: '{{ uquota_user_bsoft }}'
- bhard: '{{ uquota_user_bhard }}'
- isoft: '{{ uquota_user_isoft }}'
- ihard: '{{ uquota_user_ihard }}'
- mountpoint: '{{ remote_tmp_dir }}/uquota'
- name: xfsquotauser
- rtbsoft: '{{ uquota_user_rtbsoft }}'
- rtbhard: '{{ uquota_user_rtbhard }}'
- type: user
- become: true
- register: test_uquota_user_before
- - name: Assert user limits results
- assert:
- that:
- - test_uquota_user_before.changed
- - test_uquota_user_before.bsoft == uquota_user_bsoft|human_to_bytes
- - test_uquota_user_before.bhard == uquota_user_bhard|human_to_bytes
- - test_uquota_user_before.isoft == uquota_user_isoft
- - test_uquota_user_before.ihard == uquota_user_ihard
- - test_uquota_user_before.rtbsoft == uquota_user_rtbsoft|human_to_bytes
- - test_uquota_user_before.rtbhard == uquota_user_rtbhard|human_to_bytes
- - name: Re-apply default user limits
- xfs_quota:
- bsoft: '{{ uquota_default_bsoft }}'
- bhard: '{{ uquota_default_bhard }}'
- isoft: '{{ uquota_default_isoft }}'
- ihard: '{{ uquota_default_ihard }}'
- mountpoint: '{{ remote_tmp_dir }}/uquota'
- rtbsoft: '{{ uquota_default_rtbsoft }}'
- rtbhard: '{{ uquota_default_rtbhard }}'
- type: user
- become: true
- register: test_uquota_default_after
- - name: Assert default user limits results after re-apply
- assert:
- that:
- - not test_uquota_default_after.changed
- - name: Re-apply user limits
- xfs_quota:
- bsoft: '{{ uquota_user_bsoft }}'
- bhard: '{{ uquota_user_bhard }}'
- isoft: '{{ uquota_user_isoft }}'
- ihard: '{{ uquota_user_ihard }}'
- mountpoint: '{{ remote_tmp_dir }}/uquota'
- name: xfsquotauser
- rtbsoft: '{{ uquota_user_rtbsoft }}'
- rtbhard: '{{ uquota_user_rtbhard }}'
- type: user
- become: true
- register: test_uquota_user_after
- - name: Assert user limits results for xfsquotauser after re-apply
- assert:
- that:
- - not test_uquota_user_after.changed
- - name: Reset default user limits
- xfs_quota:
- mountpoint: '{{ remote_tmp_dir }}/uquota'
- state: absent
- type: user
- become: true
- register: test_reset_uquota_default
- - name: Assert reset of default user limits results
- assert:
- that:
- - test_reset_uquota_default.changed
- - test_reset_uquota_default.bsoft == 0
- - test_reset_uquota_default.bhard == 0
- - test_reset_uquota_default.isoft == 0
- - test_reset_uquota_default.ihard == 0
- - test_reset_uquota_default.rtbsoft == 0
- - test_reset_uquota_default.rtbhard == 0
- - name: Reset user limits for xfsquotauser
- xfs_quota:
- mountpoint: '{{ remote_tmp_dir }}/uquota'
- name: xfsquotauser
- state: absent
- type: user
- become: true
- register: test_reset_uquota_user
- - name: Assert reset of default user limits results
- assert:
- that:
- - test_reset_uquota_user.changed
- - test_reset_uquota_user.bsoft == 0
- - test_reset_uquota_user.bhard == 0
- - test_reset_uquota_user.isoft == 0
- - test_reset_uquota_user.ihard == 0
- - test_reset_uquota_user.rtbsoft == 0
- - test_reset_uquota_user.rtbhard == 0
+ - name: Mount filesystem
+ become: true
+ ansible.posix.mount:
+ fstab: '{{ remote_tmp_dir }}/fstab'
+ src: '{{ remote_tmp_dir }}/img-uquota'
+ path: '{{ remote_tmp_dir }}/uquota'
+ fstype: xfs
+ opts: uquota
+ state: mounted
+ - name: Apply default user limits
+ xfs_quota:
+ bsoft: '{{ uquota_default_bsoft }}'
+ bhard: '{{ uquota_default_bhard }}'
+ isoft: '{{ uquota_default_isoft }}'
+ ihard: '{{ uquota_default_ihard }}'
+ mountpoint: '{{ remote_tmp_dir }}/uquota'
+ rtbsoft: '{{ uquota_default_rtbsoft }}'
+ rtbhard: '{{ uquota_default_rtbhard }}'
+ type: user
+ become: true
+ register: test_uquota_default_before
+ - name: Assert default user limits results
+ assert:
+ that:
+ - test_uquota_default_before.changed
+ - test_uquota_default_before.bsoft == uquota_default_bsoft|human_to_bytes
+ - test_uquota_default_before.bhard == uquota_default_bhard|human_to_bytes
+ - test_uquota_default_before.isoft == uquota_default_isoft
+ - test_uquota_default_before.ihard == uquota_default_ihard
+ - test_uquota_default_before.rtbsoft == uquota_default_rtbsoft|human_to_bytes
+ - test_uquota_default_before.rtbhard == uquota_default_rtbhard|human_to_bytes
+ - name: Apply user limits
+ xfs_quota:
+ bsoft: '{{ uquota_user_bsoft }}'
+ bhard: '{{ uquota_user_bhard }}'
+ isoft: '{{ uquota_user_isoft }}'
+ ihard: '{{ uquota_user_ihard }}'
+ mountpoint: '{{ remote_tmp_dir }}/uquota'
+ name: xfsquotauser
+ rtbsoft: '{{ uquota_user_rtbsoft }}'
+ rtbhard: '{{ uquota_user_rtbhard }}'
+ type: user
+ become: true
+ register: test_uquota_user_before
+ - name: Assert user limits results
+ assert:
+ that:
+ - test_uquota_user_before.changed
+ - test_uquota_user_before.bsoft == uquota_user_bsoft|human_to_bytes
+ - test_uquota_user_before.bhard == uquota_user_bhard|human_to_bytes
+ - test_uquota_user_before.isoft == uquota_user_isoft
+ - test_uquota_user_before.ihard == uquota_user_ihard
+ - test_uquota_user_before.rtbsoft == uquota_user_rtbsoft|human_to_bytes
+ - test_uquota_user_before.rtbhard == uquota_user_rtbhard|human_to_bytes
+ - name: Re-apply default user limits
+ xfs_quota:
+ bsoft: '{{ uquota_default_bsoft }}'
+ bhard: '{{ uquota_default_bhard }}'
+ isoft: '{{ uquota_default_isoft }}'
+ ihard: '{{ uquota_default_ihard }}'
+ mountpoint: '{{ remote_tmp_dir }}/uquota'
+ rtbsoft: '{{ uquota_default_rtbsoft }}'
+ rtbhard: '{{ uquota_default_rtbhard }}'
+ type: user
+ become: true
+ register: test_uquota_default_after
+ - name: Assert default user limits results after re-apply
+ assert:
+ that:
+ - not test_uquota_default_after.changed
+ - name: Re-apply user limits
+ xfs_quota:
+ bsoft: '{{ uquota_user_bsoft }}'
+ bhard: '{{ uquota_user_bhard }}'
+ isoft: '{{ uquota_user_isoft }}'
+ ihard: '{{ uquota_user_ihard }}'
+ mountpoint: '{{ remote_tmp_dir }}/uquota'
+ name: xfsquotauser
+ rtbsoft: '{{ uquota_user_rtbsoft }}'
+ rtbhard: '{{ uquota_user_rtbhard }}'
+ type: user
+ become: true
+ register: test_uquota_user_after
+ - name: Assert user limits results for xfsquotauser after re-apply
+ assert:
+ that:
+ - not test_uquota_user_after.changed
+ - name: Reset default user limits
+ xfs_quota:
+ mountpoint: '{{ remote_tmp_dir }}/uquota'
+ state: absent
+ type: user
+ become: true
+ register: test_reset_uquota_default
+ - name: Assert reset of default user limits results
+ assert:
+ that:
+ - test_reset_uquota_default.changed
+ - test_reset_uquota_default.bsoft == 0
+ - test_reset_uquota_default.bhard == 0
+ - test_reset_uquota_default.isoft == 0
+ - test_reset_uquota_default.ihard == 0
+ - test_reset_uquota_default.rtbsoft == 0
+ - test_reset_uquota_default.rtbhard == 0
+ - name: Reset user limits for xfsquotauser
+ xfs_quota:
+ mountpoint: '{{ remote_tmp_dir }}/uquota'
+ name: xfsquotauser
+ state: absent
+ type: user
+ become: true
+ register: test_reset_uquota_user
+ - name: Assert reset of default user limits results
+ assert:
+ that:
+ - test_reset_uquota_user.changed
+ - test_reset_uquota_user.bsoft == 0
+ - test_reset_uquota_user.bhard == 0
+ - test_reset_uquota_user.isoft == 0
+ - test_reset_uquota_user.ihard == 0
+ - test_reset_uquota_user.rtbsoft == 0
+ - test_reset_uquota_user.rtbhard == 0
always:
- - name: Unmount filesystem
- become: true
- ansible.posix.mount:
- fstab: '{{ remote_tmp_dir }}/fstab'
- path: '{{ remote_tmp_dir }}/uquota'
- state: unmounted
- - name: Remove disk image
- file:
- path: '{{ remote_tmp_dir }}/img-uquota'
- state: absent
+ - name: Unmount filesystem
+ become: true
+ ansible.posix.mount:
+ fstab: '{{ remote_tmp_dir }}/fstab'
+ path: '{{ remote_tmp_dir }}/uquota'
+ state: unmounted
+ - name: Remove disk image
+ file:
+ path: '{{ remote_tmp_dir }}/img-uquota'
+ state: absent
diff --git a/tests/integration/targets/xml/results/test-set-children-elements-value.xml b/tests/integration/targets/xml/results/test-set-children-elements-value.xml
new file mode 100644
index 0000000000..53e23c80d6
--- /dev/null
+++ b/tests/integration/targets/xml/results/test-set-children-elements-value.xml
@@ -0,0 +1,11 @@
+
+
+ Tasty Beverage Co.
+
+ 25
+ 10
+
+
+ http://tastybeverageco.com
+
+
diff --git a/tests/sanity/ignore-2.15.txt.license b/tests/integration/targets/xml/results/test-set-children-elements-value.xml.license
similarity index 100%
rename from tests/sanity/ignore-2.15.txt.license
rename to tests/integration/targets/xml/results/test-set-children-elements-value.xml.license
diff --git a/tests/integration/targets/xml/tasks/main.yml b/tests/integration/targets/xml/tasks/main.yml
index 8235f1a6b6..5c2c01ed53 100644
--- a/tests/integration/targets/xml/tasks/main.yml
+++ b/tests/integration/targets/xml/tasks/main.yml
@@ -47,40 +47,41 @@
when: lxml_xpath_attribute_result_attrname
block:
- - include_tasks: test-add-children-elements.yml
- - include_tasks: test-add-children-from-groupvars.yml
- - include_tasks: test-add-children-insertafter.yml
- - include_tasks: test-add-children-insertbefore.yml
- - include_tasks: test-add-children-with-attributes.yml
- - include_tasks: test-add-element-implicitly.yml
- - include_tasks: test-count.yml
- - include_tasks: test-mutually-exclusive-attributes.yml
- - include_tasks: test-remove-attribute.yml
- - include_tasks: test-remove-attribute-nochange.yml
- - include_tasks: test-remove-element.yml
- - include_tasks: test-remove-element-nochange.yml
- - include_tasks: test-set-attribute-value.yml
- - include_tasks: test-set-children-elements.yml
- - include_tasks: test-set-children-elements-level.yml
- - include_tasks: test-set-element-value.yml
- - include_tasks: test-set-element-value-empty.yml
- - include_tasks: test-pretty-print.yml
- - include_tasks: test-pretty-print-only.yml
- - include_tasks: test-add-namespaced-children-elements.yml
- - include_tasks: test-remove-namespaced-attribute.yml
- - include_tasks: test-remove-namespaced-attribute-nochange.yml
- - include_tasks: test-set-namespaced-attribute-value.yml
- - include_tasks: test-set-namespaced-element-value.yml
- - include_tasks: test-set-namespaced-children-elements.yml
- - include_tasks: test-get-element-content.yml
- - include_tasks: test-xmlstring.yml
- - include_tasks: test-children-elements-xml.yml
+ - include_tasks: test-add-children-elements.yml
+ - include_tasks: test-add-children-from-groupvars.yml
+ - include_tasks: test-add-children-insertafter.yml
+ - include_tasks: test-add-children-insertbefore.yml
+ - include_tasks: test-add-children-with-attributes.yml
+ - include_tasks: test-add-element-implicitly.yml
+ - include_tasks: test-count.yml
+ - include_tasks: test-mutually-exclusive-attributes.yml
+ - include_tasks: test-remove-attribute.yml
+ - include_tasks: test-remove-attribute-nochange.yml
+ - include_tasks: test-remove-element.yml
+ - include_tasks: test-remove-element-nochange.yml
+ - include_tasks: test-set-attribute-value.yml
+ - include_tasks: test-set-children-elements.yml
+ - include_tasks: test-set-children-elements-level.yml
+ - include_tasks: test-set-children-elements-value.yml
+ - include_tasks: test-set-element-value.yml
+ - include_tasks: test-set-element-value-empty.yml
+ - include_tasks: test-pretty-print.yml
+ - include_tasks: test-pretty-print-only.yml
+ - include_tasks: test-add-namespaced-children-elements.yml
+ - include_tasks: test-remove-namespaced-attribute.yml
+ - include_tasks: test-remove-namespaced-attribute-nochange.yml
+ - include_tasks: test-set-namespaced-attribute-value.yml
+ - include_tasks: test-set-namespaced-element-value.yml
+ - include_tasks: test-set-namespaced-children-elements.yml
+ - include_tasks: test-get-element-content.yml
+ - include_tasks: test-xmlstring.yml
+ - include_tasks: test-children-elements-xml.yml
- # Unicode tests
- - include_tasks: test-add-children-elements-unicode.yml
- - include_tasks: test-add-children-with-attributes-unicode.yml
- - include_tasks: test-set-attribute-value-unicode.yml
- - include_tasks: test-count-unicode.yml
- - include_tasks: test-get-element-content.yml
- - include_tasks: test-set-children-elements-unicode.yml
- - include_tasks: test-set-element-value-unicode.yml
+ # Unicode tests
+ - include_tasks: test-add-children-elements-unicode.yml
+ - include_tasks: test-add-children-with-attributes-unicode.yml
+ - include_tasks: test-set-attribute-value-unicode.yml
+ - include_tasks: test-count-unicode.yml
+ - include_tasks: test-get-element-content.yml
+ - include_tasks: test-set-children-elements-unicode.yml
+ - include_tasks: test-set-element-value-unicode.yml
diff --git a/tests/integration/targets/xml/tasks/test-add-children-elements-unicode.yml b/tests/integration/targets/xml/tasks/test-add-children-elements-unicode.yml
index e15ac5fd92..0b79cdafa5 100644
--- a/tests/integration/targets/xml/tasks/test-add-children-elements-unicode.yml
+++ b/tests/integration/targets/xml/tasks/test-add-children-elements-unicode.yml
@@ -3,34 +3,34 @@
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
- - name: Setup test fixture
- copy:
- src: fixtures/ansible-xml-beers.xml
- dest: /tmp/ansible-xml-beers.xml
+- name: Setup test fixture
+ copy:
+ src: fixtures/ansible-xml-beers.xml
+ dest: /tmp/ansible-xml-beers.xml
- - name: Add child element
- xml:
- path: /tmp/ansible-xml-beers.xml
- xpath: /business/beers
- add_children:
+- name: Add child element
+ xml:
+ path: /tmp/ansible-xml-beers.xml
+ xpath: /business/beers
+ add_children:
- beer: Окское
- register: add_children_elements_unicode
+ register: add_children_elements_unicode
- - name: Add trailing newline
- shell: echo "" >> /tmp/ansible-xml-beers.xml
+- name: Add trailing newline
+ shell: echo "" >> /tmp/ansible-xml-beers.xml
- - name: Compare to expected result
- copy:
- src: results/test-add-children-elements-unicode.xml
- dest: /tmp/ansible-xml-beers.xml
- check_mode: true
- diff: true
- register: comparison
+- name: Compare to expected result
+ copy:
+ src: results/test-add-children-elements-unicode.xml
+ dest: /tmp/ansible-xml-beers.xml
+ check_mode: true
+ diff: true
+ register: comparison
- - name: Test expected result
- assert:
- that:
+- name: Test expected result
+ assert:
+ that:
- add_children_elements_unicode is changed
- comparison is not changed # identical
- #command: diff -u {{ role_path }}/results/test-add-children-elements-unicode.xml /tmp/ansible-xml-beers.xml
+ # command: diff -u {{ role_path }}/results/test-add-children-elements-unicode.xml /tmp/ansible-xml-beers.xml
diff --git a/tests/integration/targets/xml/tasks/test-add-children-elements.yml b/tests/integration/targets/xml/tasks/test-add-children-elements.yml
index 29467f6d6f..68e295cccf 100644
--- a/tests/integration/targets/xml/tasks/test-add-children-elements.yml
+++ b/tests/integration/targets/xml/tasks/test-add-children-elements.yml
@@ -3,34 +3,34 @@
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
- - name: Setup test fixture
- copy:
- src: fixtures/ansible-xml-beers.xml
- dest: /tmp/ansible-xml-beers.xml
+- name: Setup test fixture
+ copy:
+ src: fixtures/ansible-xml-beers.xml
+ dest: /tmp/ansible-xml-beers.xml
- - name: Add child element
- xml:
- path: /tmp/ansible-xml-beers.xml
- xpath: /business/beers
- add_children:
+- name: Add child element
+ xml:
+ path: /tmp/ansible-xml-beers.xml
+ xpath: /business/beers
+ add_children:
- beer: Old Rasputin
- register: add_children_elements
+ register: add_children_elements
- - name: Add trailing newline
- shell: echo "" >> /tmp/ansible-xml-beers.xml
+- name: Add trailing newline
+ shell: echo "" >> /tmp/ansible-xml-beers.xml
- - name: Compare to expected result
- copy:
- src: results/test-add-children-elements.xml
- dest: /tmp/ansible-xml-beers.xml
- check_mode: true
- diff: true
- register: comparison
+- name: Compare to expected result
+ copy:
+ src: results/test-add-children-elements.xml
+ dest: /tmp/ansible-xml-beers.xml
+ check_mode: true
+ diff: true
+ register: comparison
- - name: Test expected result
- assert:
- that:
+- name: Test expected result
+ assert:
+ that:
- add_children_elements is changed
- comparison is not changed # identical
- #command: diff -u {{ role_path }}/results/test-add-children-elements.xml /tmp/ansible-xml-beers.xml
+ # command: diff -u {{ role_path }}/results/test-add-children-elements.xml /tmp/ansible-xml-beers.xml
diff --git a/tests/integration/targets/xml/tasks/test-add-children-from-groupvars.yml b/tests/integration/targets/xml/tasks/test-add-children-from-groupvars.yml
index 2b232b6d0d..d65abd152a 100644
--- a/tests/integration/targets/xml/tasks/test-add-children-from-groupvars.yml
+++ b/tests/integration/targets/xml/tasks/test-add-children-from-groupvars.yml
@@ -3,33 +3,33 @@
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
- - name: Setup test fixture
- copy:
- src: fixtures/ansible-xml-beers.xml
- dest: /tmp/ansible-xml-beers.xml
+- name: Setup test fixture
+ copy:
+ src: fixtures/ansible-xml-beers.xml
+ dest: /tmp/ansible-xml-beers.xml
- - name: Add child element
- xml:
- path: /tmp/ansible-xml-beers.xml
- xpath: /business/beers
- add_children: '{{ bad_beers }}'
- register: add_children_from_groupvars
+- name: Add child element
+ xml:
+ path: /tmp/ansible-xml-beers.xml
+ xpath: /business/beers
+ add_children: '{{ bad_beers }}'
+ register: add_children_from_groupvars
- - name: Add trailing newline
- shell: echo "" >> /tmp/ansible-xml-beers.xml
+- name: Add trailing newline
+ shell: echo "" >> /tmp/ansible-xml-beers.xml
- - name: Compare to expected result
- copy:
- src: results/test-add-children-from-groupvars.xml
- dest: /tmp/ansible-xml-beers.xml
- check_mode: true
- diff: true
- register: comparison
+- name: Compare to expected result
+ copy:
+ src: results/test-add-children-from-groupvars.xml
+ dest: /tmp/ansible-xml-beers.xml
+ check_mode: true
+ diff: true
+ register: comparison
- - name: Test expected result
- assert:
- that:
+- name: Test expected result
+ assert:
+ that:
- add_children_from_groupvars is changed
- comparison is not changed # identical
- #command: diff -u {{ role_path }}/results/test-add-children-from-groupvars.xml /tmp/ansible-xml-beers.xml
+ # command: diff -u {{ role_path }}/results/test-add-children-from-groupvars.xml /tmp/ansible-xml-beers.xml
diff --git a/tests/integration/targets/xml/tasks/test-add-children-insertafter.yml b/tests/integration/targets/xml/tasks/test-add-children-insertafter.yml
index 7795c89663..4c581556e8 100644
--- a/tests/integration/targets/xml/tasks/test-add-children-insertafter.yml
+++ b/tests/integration/targets/xml/tasks/test-add-children-insertafter.yml
@@ -3,34 +3,34 @@
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
- - name: Setup test fixture
- copy:
- src: fixtures/ansible-xml-beers.xml
- dest: /tmp/ansible-xml-beers.xml
+- name: Setup test fixture
+ copy:
+ src: fixtures/ansible-xml-beers.xml
+ dest: /tmp/ansible-xml-beers.xml
- - name: Add child element
- xml:
- path: /tmp/ansible-xml-beers.xml
- xpath: '/business/beers/beer[text()="St. Bernardus Abbot 12"]'
- insertafter: true
- add_children:
+- name: Add child element
+ xml:
+ path: /tmp/ansible-xml-beers.xml
+ xpath: '/business/beers/beer[text()="St. Bernardus Abbot 12"]'
+ insertafter: true
+ add_children:
- beer: Old Rasputin
- beer: Old Motor Oil
- beer: Old Curmudgeon
- pretty_print: true
- register: add_children_insertafter
+ pretty_print: true
+ register: add_children_insertafter
- - name: Compare to expected result
- copy:
- src: results/test-add-children-insertafter.xml
- dest: /tmp/ansible-xml-beers.xml
- check_mode: true
- diff: true
- register: comparison
+- name: Compare to expected result
+ copy:
+ src: results/test-add-children-insertafter.xml
+ dest: /tmp/ansible-xml-beers.xml
+ check_mode: true
+ diff: true
+ register: comparison
- - name: Test expected result
- assert:
- that:
+- name: Test expected result
+ assert:
+ that:
- add_children_insertafter is changed
- comparison is not changed # identical
diff --git a/tests/integration/targets/xml/tasks/test-add-children-insertbefore.yml b/tests/integration/targets/xml/tasks/test-add-children-insertbefore.yml
index b14c5e06fc..6b7b325e8f 100644
--- a/tests/integration/targets/xml/tasks/test-add-children-insertbefore.yml
+++ b/tests/integration/targets/xml/tasks/test-add-children-insertbefore.yml
@@ -3,34 +3,34 @@
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
- - name: Setup test fixture
- copy:
- src: fixtures/ansible-xml-beers.xml
- dest: /tmp/ansible-xml-beers.xml
+- name: Setup test fixture
+ copy:
+ src: fixtures/ansible-xml-beers.xml
+ dest: /tmp/ansible-xml-beers.xml
- - name: Add child element
- xml:
- path: /tmp/ansible-xml-beers.xml
- xpath: '/business/beers/beer[text()="St. Bernardus Abbot 12"]'
- insertbefore: true
- add_children:
+- name: Add child element
+ xml:
+ path: /tmp/ansible-xml-beers.xml
+ xpath: '/business/beers/beer[text()="St. Bernardus Abbot 12"]'
+ insertbefore: true
+ add_children:
- beer: Old Rasputin
- beer: Old Motor Oil
- beer: Old Curmudgeon
- pretty_print: true
- register: add_children_insertbefore
+ pretty_print: true
+ register: add_children_insertbefore
- - name: Compare to expected result
- copy:
- src: results/test-add-children-insertbefore.xml
- dest: /tmp/ansible-xml-beers.xml
- check_mode: true
- diff: true
- register: comparison
+- name: Compare to expected result
+ copy:
+ src: results/test-add-children-insertbefore.xml
+ dest: /tmp/ansible-xml-beers.xml
+ check_mode: true
+ diff: true
+ register: comparison
- - name: Test expected result
- assert:
- that:
+- name: Test expected result
+ assert:
+ that:
- add_children_insertbefore is changed
- comparison is not changed # identical
diff --git a/tests/integration/targets/xml/tasks/test-add-children-with-attributes-unicode.yml b/tests/integration/targets/xml/tasks/test-add-children-with-attributes-unicode.yml
index 07905aa15c..b09a117e5b 100644
--- a/tests/integration/targets/xml/tasks/test-add-children-with-attributes-unicode.yml
+++ b/tests/integration/targets/xml/tasks/test-add-children-with-attributes-unicode.yml
@@ -3,36 +3,36 @@
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
- - name: Setup test fixture
- copy:
- src: fixtures/ansible-xml-beers.xml
- dest: /tmp/ansible-xml-beers.xml
+- name: Setup test fixture
+ copy:
+ src: fixtures/ansible-xml-beers.xml
+ dest: /tmp/ansible-xml-beers.xml
- - name: Add child element
- xml:
- path: /tmp/ansible-xml-beers.xml
- xpath: /business/beers
- add_children:
+- name: Add child element
+ xml:
+ path: /tmp/ansible-xml-beers.xml
+ xpath: /business/beers
+ add_children:
- beer:
name: Окское
type: экстра
- register: add_children_with_attributes_unicode
+ register: add_children_with_attributes_unicode
- - name: Add trailing newline
- shell: echo "" >> /tmp/ansible-xml-beers.xml
+- name: Add trailing newline
+ shell: echo "" >> /tmp/ansible-xml-beers.xml
- - name: Compare to expected result
- copy:
- src: results/test-add-children-with-attributes-unicode.xml
- dest: /tmp/ansible-xml-beers.xml
- check_mode: true
- diff: true
- register: comparison
+- name: Compare to expected result
+ copy:
+ src: results/test-add-children-with-attributes-unicode.xml
+ dest: /tmp/ansible-xml-beers.xml
+ check_mode: true
+ diff: true
+ register: comparison
- - name: Test expected result
- assert:
- that:
+- name: Test expected result
+ assert:
+ that:
- add_children_with_attributes_unicode is changed
- comparison is not changed # identical
- #command: diff -u {{ role_path }}/results/test-add-children-with-attributes-unicode.xml /tmp/ansible-xml-beers.xml
+ # command: diff -u {{ role_path }}/results/test-add-children-with-attributes-unicode.xml /tmp/ansible-xml-beers.xml
diff --git a/tests/integration/targets/xml/tasks/test-add-children-with-attributes.yml b/tests/integration/targets/xml/tasks/test-add-children-with-attributes.yml
index fede24395f..3cc1b36875 100644
--- a/tests/integration/targets/xml/tasks/test-add-children-with-attributes.yml
+++ b/tests/integration/targets/xml/tasks/test-add-children-with-attributes.yml
@@ -3,40 +3,40 @@
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
- - name: Setup test fixture
- copy:
- src: fixtures/ansible-xml-beers.xml
- dest: /tmp/ansible-xml-beers.xml
+- name: Setup test fixture
+ copy:
+ src: fixtures/ansible-xml-beers.xml
+ dest: /tmp/ansible-xml-beers.xml
- - name: Add child element
- xml:
- path: /tmp/ansible-xml-beers.xml
- xpath: /business/beers
- add_children:
+- name: Add child element
+ xml:
+ path: /tmp/ansible-xml-beers.xml
+ xpath: /business/beers
+ add_children:
- beer:
name: Ansible Brew
type: light
- register: add_children_with_attributes
+ register: add_children_with_attributes
- - name: Add trailing newline
- shell: echo "" >> /tmp/ansible-xml-beers.xml
+- name: Add trailing newline
+ shell: echo "" >> /tmp/ansible-xml-beers.xml
- - name: Compare to expected result
- copy:
- src: results/test-add-children-with-attributes.xml
- dest: /tmp/ansible-xml-beers.xml
- check_mode: true
- diff: true
- register: comparison
+- name: Compare to expected result
+ copy:
+ src: results/test-add-children-with-attributes.xml
+ dest: /tmp/ansible-xml-beers.xml
+ check_mode: true
+ diff: true
+ register: comparison
- # NOTE: This test may fail if lxml does not support predictable element attribute order
- # So we filter the failure out for these platforms (e.g. CentOS 6)
- # The module still works fine, we simply are not comparing as smart as we should.
- - name: Test expected result
- assert:
- that:
+# NOTE: This test may fail if lxml does not support predictable element attribute order
+# So we filter the failure out for these platforms (e.g. CentOS 6)
+# The module still works fine, we simply are not comparing as smart as we should.
+- name: Test expected result
+ assert:
+ that:
- add_children_with_attributes is changed
- comparison is not changed # identical
- when: lxml_predictable_attribute_order
- #command: diff -u {{ role_path }}/results/test-add-children-with-attributes.xml /tmp/ansible-xml-beers.xml
+ when: lxml_predictable_attribute_order
+ # command: diff -u {{ role_path }}/results/test-add-children-with-attributes.xml /tmp/ansible-xml-beers.xml
diff --git a/tests/integration/targets/xml/tasks/test-add-element-implicitly.yml b/tests/integration/targets/xml/tasks/test-add-element-implicitly.yml
index b1718e452e..65cc19ca12 100644
--- a/tests/integration/targets/xml/tasks/test-add-element-implicitly.yml
+++ b/tests/integration/targets/xml/tasks/test-add-element-implicitly.yml
@@ -44,8 +44,8 @@
- name: Add an attribute with a value
xml:
- file: /tmp/ansible-xml-beers-implicit.xml
- xpath: /business/owner/@dob='1976-04-12'
+ file: /tmp/ansible-xml-beers-implicit.xml
+ xpath: /business/owner/@dob='1976-04-12'
- name: Add an element with a value, alternate syntax
xml:
@@ -112,8 +112,8 @@
- name: Test expected result
assert:
that:
- - comparison is not changed # identical
- #command: diff -u {{ role_path }}/results/test-add-element-implicitly.xml /tmp/ansible-xml-beers-implicit.xml
+ - comparison is not changed # identical
+ # command: diff -u {{ role_path }}/results/test-add-element-implicitly.xml /tmp/ansible-xml-beers-implicit.xml
# Now we repeat the same, just to ensure proper use of namespaces
@@ -205,7 +205,7 @@
value: xml tag with no special characters
pretty_print: true
namespaces:
- a: http://example.com/some/namespace
+ a: http://example.com/some/namespace
- name: Add an element with dash
@@ -215,7 +215,7 @@
value: xml tag with dashes
pretty_print: true
namespaces:
- a: http://example.com/some/namespace
+ a: http://example.com/some/namespace
- name: Add an element with dot
xml:
@@ -224,7 +224,7 @@
value: xml tag with dashes and dots
pretty_print: true
namespaces:
- a: http://example.com/some/namespace
+ a: http://example.com/some/namespace
- name: Add an element with underscore
xml:
@@ -233,7 +233,7 @@
value: xml tag with dashes, dots and underscores
pretty_print: true
namespaces:
- a: http://example.com/some/namespace
+ a: http://example.com/some/namespace
- name: Pretty Print this!
xml:
diff --git a/tests/integration/targets/xml/tasks/test-add-namespaced-children-elements.yml b/tests/integration/targets/xml/tasks/test-add-namespaced-children-elements.yml
index 2a9daab787..884fc4a917 100644
--- a/tests/integration/targets/xml/tasks/test-add-namespaced-children-elements.yml
+++ b/tests/integration/targets/xml/tasks/test-add-namespaced-children-elements.yml
@@ -3,37 +3,37 @@
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
- - name: Setup test fixture
- copy:
- src: fixtures/ansible-xml-namespaced-beers.xml
- dest: /tmp/ansible-xml-namespaced-beers.xml
+- name: Setup test fixture
+ copy:
+ src: fixtures/ansible-xml-namespaced-beers.xml
+ dest: /tmp/ansible-xml-namespaced-beers.xml
- - name: Add namespaced child element
- xml:
- path: /tmp/ansible-xml-namespaced-beers.xml
- xpath: /bus:business/ber:beers
- namespaces:
- bus: http://test.business
- ber: http://test.beers
- add_children:
+- name: Add namespaced child element
+ xml:
+ path: /tmp/ansible-xml-namespaced-beers.xml
+ xpath: /bus:business/ber:beers
+ namespaces:
+ bus: http://test.business
+ ber: http://test.beers
+ add_children:
- beer: Old Rasputin
- register: add_namespaced_children_elements
+ register: add_namespaced_children_elements
- - name: Add trailing newline
- shell: echo "" >> /tmp/ansible-xml-namespaced-beers.xml
+- name: Add trailing newline
+ shell: echo "" >> /tmp/ansible-xml-namespaced-beers.xml
- - name: Compare to expected result
- copy:
- src: results/test-add-namespaced-children-elements.xml
- dest: /tmp/ansible-xml-namespaced-beers.xml
- check_mode: true
- diff: true
- register: comparison
+- name: Compare to expected result
+ copy:
+ src: results/test-add-namespaced-children-elements.xml
+ dest: /tmp/ansible-xml-namespaced-beers.xml
+ check_mode: true
+ diff: true
+ register: comparison
- - name: Test expected result
- assert:
- that:
+- name: Test expected result
+ assert:
+ that:
- add_namespaced_children_elements is changed
- comparison is not changed # identical
- #command: diff -u {{ role_path }}/results/test-add-namespaced-children-elements.xml /tmp/ansible-xml-namespaced-beers.xml
+ # command: diff -u {{ role_path }}/results/test-add-namespaced-children-elements.xml /tmp/ansible-xml-namespaced-beers.xml
diff --git a/tests/integration/targets/xml/tasks/test-children-elements-xml.yml b/tests/integration/targets/xml/tasks/test-children-elements-xml.yml
index 1c8c2b804d..630699dd56 100644
--- a/tests/integration/targets/xml/tasks/test-children-elements-xml.yml
+++ b/tests/integration/targets/xml/tasks/test-children-elements-xml.yml
@@ -3,35 +3,35 @@
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
- - name: Setup test fixture
- copy:
- src: fixtures/ansible-xml-beers.xml
- dest: /tmp/ansible-xml-beers.xml
+- name: Setup test fixture
+ copy:
+ src: fixtures/ansible-xml-beers.xml
+ dest: /tmp/ansible-xml-beers.xml
- - name: Add child element with xml format
- xml:
- path: /tmp/ansible-xml-beers.xml
- xpath: /business/beers
- input_type: xml
- add_children:
+- name: Add child element with xml format
+ xml:
+ path: /tmp/ansible-xml-beers.xml
+ xpath: /business/beers
+ input_type: xml
+ add_children:
- 'Old Rasputin'
- register: children_elements
+ register: children_elements
- - name: Add trailing newline
- shell: echo "" >> /tmp/ansible-xml-beers.xml
+- name: Add trailing newline
+ shell: echo "" >> /tmp/ansible-xml-beers.xml
- - name: Compare to expected result
- copy:
- src: results/test-add-children-elements.xml
- dest: /tmp/ansible-xml-beers.xml
- check_mode: true
- diff: true
- register: comparison
+- name: Compare to expected result
+ copy:
+ src: results/test-add-children-elements.xml
+ dest: /tmp/ansible-xml-beers.xml
+ check_mode: true
+ diff: true
+ register: comparison
- - name: Test expected result
- assert:
- that:
+- name: Test expected result
+ assert:
+ that:
- children_elements is changed
- comparison is not changed # identical
- #command: diff -u {{ role_path }}/results/test-add-children-elements.xml /tmp/ansible-xml-beers.xml
+ # command: diff -u {{ role_path }}/results/test-add-children-elements.xml /tmp/ansible-xml-beers.xml
diff --git a/tests/integration/targets/xml/tasks/test-count-unicode.yml b/tests/integration/targets/xml/tasks/test-count-unicode.yml
index 118e2986db..e54e466902 100644
--- a/tests/integration/targets/xml/tasks/test-count-unicode.yml
+++ b/tests/integration/targets/xml/tasks/test-count-unicode.yml
@@ -3,21 +3,21 @@
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
- - name: Setup test fixture
- copy:
- src: fixtures/ansible-xml-beers-unicode.xml
- dest: /tmp/ansible-xml-beers-unicode.xml
+- name: Setup test fixture
+ copy:
+ src: fixtures/ansible-xml-beers-unicode.xml
+ dest: /tmp/ansible-xml-beers-unicode.xml
- - name: Count child element
- xml:
- path: /tmp/ansible-xml-beers-unicode.xml
- xpath: /business/beers/beer
- count: true
- register: beers
+- name: Count child element
+ xml:
+ path: /tmp/ansible-xml-beers-unicode.xml
+ xpath: /business/beers/beer
+ count: true
+ register: beers
- - name: Test expected result
- assert:
- that:
+- name: Test expected result
+ assert:
+ that:
- beers is not changed
- beers.count == 2
diff --git a/tests/integration/targets/xml/tasks/test-count.yml b/tests/integration/targets/xml/tasks/test-count.yml
index 79be9402fe..1e4b043a02 100644
--- a/tests/integration/targets/xml/tasks/test-count.yml
+++ b/tests/integration/targets/xml/tasks/test-count.yml
@@ -3,21 +3,21 @@
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
- - name: Setup test fixture
- copy:
- src: fixtures/ansible-xml-beers.xml
- dest: /tmp/ansible-xml-beers.xml
+- name: Setup test fixture
+ copy:
+ src: fixtures/ansible-xml-beers.xml
+ dest: /tmp/ansible-xml-beers.xml
- - name: Add child element
- xml:
- path: /tmp/ansible-xml-beers.xml
- xpath: /business/beers/beer
- count: true
- register: beers
+- name: Add child element
+ xml:
+ path: /tmp/ansible-xml-beers.xml
+ xpath: /business/beers/beer
+ count: true
+ register: beers
- - name: Test expected result
- assert:
- that:
+- name: Test expected result
+ assert:
+ that:
- beers is not changed
- beers.count == 3
diff --git a/tests/integration/targets/xml/tasks/test-get-element-content-unicode.yml b/tests/integration/targets/xml/tasks/test-get-element-content-unicode.yml
index 475f962ebe..f57e5fa33d 100644
--- a/tests/integration/targets/xml/tasks/test-get-element-content-unicode.yml
+++ b/tests/integration/targets/xml/tasks/test-get-element-content-unicode.yml
@@ -3,34 +3,34 @@
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
- - name: Setup test fixture
- copy:
- src: fixtures/ansible-xml-beers-unicode.xml
- dest: /tmp/ansible-xml-beers-unicode.xml
+- name: Setup test fixture
+ copy:
+ src: fixtures/ansible-xml-beers-unicode.xml
+ dest: /tmp/ansible-xml-beers-unicode.xml
- - name: Get element attributes
- xml:
- path: /tmp/ansible-xml-beers-unicode.xml
- xpath: /business/rating
- content: attribute
- register: get_element_attribute
+- name: Get element attributes
+ xml:
+ path: /tmp/ansible-xml-beers-unicode.xml
+ xpath: /business/rating
+ content: attribute
+ register: get_element_attribute
- - name: Test expected result
- assert:
- that:
+- name: Test expected result
+ assert:
+ that:
- get_element_attribute is not changed
- get_element_attribute.matches[0]['rating'] is defined and get_element_attribute.matches[0]['rating']['subjective'] == 'да'
- - name: Get element text
- xml:
- path: /tmp/ansible-xml-beers-unicode.xml
- xpath: /business/rating
- content: text
- register: get_element_text
+- name: Get element text
+ xml:
+ path: /tmp/ansible-xml-beers-unicode.xml
+ xpath: /business/rating
+ content: text
+ register: get_element_text
- - name: Test expected result
- assert:
- that:
+- name: Test expected result
+ assert:
+ that:
- get_element_text is not changed
- get_element_text.matches[0]['rating'] == 'десять'
diff --git a/tests/integration/targets/xml/tasks/test-get-element-content.yml b/tests/integration/targets/xml/tasks/test-get-element-content.yml
index c75bdb223a..2bef5fd165 100644
--- a/tests/integration/targets/xml/tasks/test-get-element-content.yml
+++ b/tests/integration/targets/xml/tasks/test-get-element-content.yml
@@ -3,49 +3,49 @@
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
- - name: Setup test fixture
- copy:
- src: fixtures/ansible-xml-beers.xml
- dest: /tmp/ansible-xml-beers.xml
+- name: Setup test fixture
+ copy:
+ src: fixtures/ansible-xml-beers.xml
+ dest: /tmp/ansible-xml-beers.xml
- - name: Get element attributes
- xml:
- path: /tmp/ansible-xml-beers.xml
- xpath: /business/rating
- content: attribute
- register: get_element_attribute
+- name: Get element attributes
+ xml:
+ path: /tmp/ansible-xml-beers.xml
+ xpath: /business/rating
+ content: attribute
+ register: get_element_attribute
- - name: Test expected result
- assert:
- that:
+- name: Test expected result
+ assert:
+ that:
- get_element_attribute is not changed
- get_element_attribute.matches[0]['rating'] is defined
- get_element_attribute.matches[0]['rating']['subjective'] == 'true'
- - name: Get element attributes (should fail)
- xml:
- path: /tmp/ansible-xml-beers.xml
- xpath: /business/rating
- content: attribute
- attribute: subjective
- register: get_element_attribute_wrong
- ignore_errors: true
+- name: Get element attributes (should fail)
+ xml:
+ path: /tmp/ansible-xml-beers.xml
+ xpath: /business/rating
+ content: attribute
+ attribute: subjective
+ register: get_element_attribute_wrong
+ ignore_errors: true
- - name: Test expected result
- assert:
- that:
+- name: Test expected result
+ assert:
+ that:
- get_element_attribute_wrong is failed
- - name: Get element text
- xml:
- path: /tmp/ansible-xml-beers.xml
- xpath: /business/rating
- content: text
- register: get_element_text
+- name: Get element text
+ xml:
+ path: /tmp/ansible-xml-beers.xml
+ xpath: /business/rating
+ content: text
+ register: get_element_text
- - name: Test expected result
- assert:
- that:
+- name: Test expected result
+ assert:
+ that:
- get_element_text is not changed
- get_element_text.matches[0]['rating'] == '10'
diff --git a/tests/integration/targets/xml/tasks/test-mutually-exclusive-attributes.yml b/tests/integration/targets/xml/tasks/test-mutually-exclusive-attributes.yml
index 33f129e2e6..90bd14f7ed 100644
--- a/tests/integration/targets/xml/tasks/test-mutually-exclusive-attributes.yml
+++ b/tests/integration/targets/xml/tasks/test-mutually-exclusive-attributes.yml
@@ -3,24 +3,24 @@
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
- - name: Setup test fixture
- copy:
- src: fixtures/ansible-xml-beers.xml
- dest: /tmp/ansible-xml-beers.xml
+- name: Setup test fixture
+ copy:
+ src: fixtures/ansible-xml-beers.xml
+ dest: /tmp/ansible-xml-beers.xml
- - name: Specify both children to add and a value
- xml:
- path: /tmp/ansible-xml-beers.xml
- add_children:
- - child01
- - child02
- value: conflict!
- register: module_output
- ignore_errors: true
+- name: Specify both children to add and a value
+ xml:
+ path: /tmp/ansible-xml-beers.xml
+ add_children:
+ - child01
+ - child02
+ value: conflict!
+ register: module_output
+ ignore_errors: true
- - name: Test expected result
- assert:
- that:
+- name: Test expected result
+ assert:
+ that:
- module_output is not changed
- module_output is failed
diff --git a/tests/integration/targets/xml/tasks/test-pretty-print-only.yml b/tests/integration/targets/xml/tasks/test-pretty-print-only.yml
index 03d3299aa7..bd419e2313 100644
--- a/tests/integration/targets/xml/tasks/test-pretty-print-only.yml
+++ b/tests/integration/targets/xml/tasks/test-pretty-print-only.yml
@@ -3,31 +3,31 @@
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
- - name: Setup test fixture
- copy:
- src: fixtures/ansible-xml-beers.xml
- dest: /tmp/ansible-xml-beers.xml.orig
+- name: Setup test fixture
+ copy:
+ src: fixtures/ansible-xml-beers.xml
+ dest: /tmp/ansible-xml-beers.xml.orig
- - name: Remove spaces from test fixture
- shell: sed 's/^[ ]*//g' < /tmp/ansible-xml-beers.xml.orig > /tmp/ansible-xml-beers.xml
+- name: Remove spaces from test fixture
+ shell: sed 's/^[ ]*//g' < /tmp/ansible-xml-beers.xml.orig > /tmp/ansible-xml-beers.xml
- - name: Pretty print without modification
- xml:
- path: /tmp/ansible-xml-beers.xml
- pretty_print: true
- register: pretty_print_only
+- name: Pretty print without modification
+ xml:
+ path: /tmp/ansible-xml-beers.xml
+ pretty_print: true
+ register: pretty_print_only
- - name: Compare to expected result
- copy:
- src: results/test-pretty-print-only.xml
- dest: /tmp/ansible-xml-beers.xml
- check_mode: true
- diff: true
- register: comparison
+- name: Compare to expected result
+ copy:
+ src: results/test-pretty-print-only.xml
+ dest: /tmp/ansible-xml-beers.xml
+ check_mode: true
+ diff: true
+ register: comparison
- - name: Test expected result
- assert:
- that:
+- name: Test expected result
+ assert:
+ that:
- pretty_print_only is changed
- comparison is not changed # identical
- #command: diff -u {{ role_path }}/results/test-pretty-print-only.xml /tmp/ansible-xml-beers.xml
+ # command: diff -u {{ role_path }}/results/test-pretty-print-only.xml /tmp/ansible-xml-beers.xml
diff --git a/tests/integration/targets/xml/tasks/test-pretty-print.yml b/tests/integration/targets/xml/tasks/test-pretty-print.yml
index 51b34502d5..baa8570cf2 100644
--- a/tests/integration/targets/xml/tasks/test-pretty-print.yml
+++ b/tests/integration/targets/xml/tasks/test-pretty-print.yml
@@ -3,32 +3,32 @@
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
- - name: Setup test fixture
- copy:
- src: fixtures/ansible-xml-beers.xml
- dest: /tmp/ansible-xml-beers.xml
+- name: Setup test fixture
+ copy:
+ src: fixtures/ansible-xml-beers.xml
+ dest: /tmp/ansible-xml-beers.xml
- - name: Pretty print
- xml:
- path: /tmp/ansible-xml-beers.xml
- xpath: /business/beers
- pretty_print: true
- add_children:
+- name: Pretty print
+ xml:
+ path: /tmp/ansible-xml-beers.xml
+ xpath: /business/beers
+ pretty_print: true
+ add_children:
- beer: Old Rasputin
- register: pretty_print
+ register: pretty_print
- - name: Compare to expected result
- copy:
- src: results/test-pretty-print.xml
- dest: /tmp/ansible-xml-beers.xml
- check_mode: true
- diff: true
- register: comparison
+- name: Compare to expected result
+ copy:
+ src: results/test-pretty-print.xml
+ dest: /tmp/ansible-xml-beers.xml
+ check_mode: true
+ diff: true
+ register: comparison
- - name: Test expected result
- assert:
- that:
+- name: Test expected result
+ assert:
+ that:
- pretty_print is changed
- comparison is not changed # identical
- #command: diff -u {{ role_path }}/results/test-pretty-print.xml /tmp/ansible-xml-beers.xml
+ # command: diff -u {{ role_path }}/results/test-pretty-print.xml /tmp/ansible-xml-beers.xml
diff --git a/tests/integration/targets/xml/tasks/test-remove-attribute-nochange.yml b/tests/integration/targets/xml/tasks/test-remove-attribute-nochange.yml
index 3222bd4368..7e1cc73456 100644
--- a/tests/integration/targets/xml/tasks/test-remove-attribute-nochange.yml
+++ b/tests/integration/targets/xml/tasks/test-remove-attribute-nochange.yml
@@ -3,30 +3,30 @@
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
- - name: Setup test fixture
- copy:
- src: results/test-remove-attribute.xml
- dest: /tmp/ansible-xml-beers.xml
+- name: Setup test fixture
+ copy:
+ src: results/test-remove-attribute.xml
+ dest: /tmp/ansible-xml-beers.xml
- - name: Remove non-existing '/business/rating/@subjective'
- xml:
- path: /tmp/ansible-xml-beers.xml
- xpath: /business/rating/@subjective
- state: absent
- register: remove_attribute
+- name: Remove non-existing '/business/rating/@subjective'
+ xml:
+ path: /tmp/ansible-xml-beers.xml
+ xpath: /business/rating/@subjective
+ state: absent
+ register: remove_attribute
- - name: Compare to expected result
- copy:
- src: results/test-remove-attribute.xml
- dest: /tmp/ansible-xml-beers.xml
- check_mode: true
- diff: true
- register: comparison
+- name: Compare to expected result
+ copy:
+ src: results/test-remove-attribute.xml
+ dest: /tmp/ansible-xml-beers.xml
+ check_mode: true
+ diff: true
+ register: comparison
- - name: Test expected result
- assert:
- that:
+- name: Test expected result
+ assert:
+ that:
- remove_attribute is not changed
- comparison is not changed # identical
- #command: diff -u {{ role_path }}/results/test-remove-attribute.xml /tmp/ansible-xml-beers.xml
+ # command: diff -u {{ role_path }}/results/test-remove-attribute.xml /tmp/ansible-xml-beers.xml
diff --git a/tests/integration/targets/xml/tasks/test-remove-attribute.yml b/tests/integration/targets/xml/tasks/test-remove-attribute.yml
index e8952a655e..9b24a37a9f 100644
--- a/tests/integration/targets/xml/tasks/test-remove-attribute.yml
+++ b/tests/integration/targets/xml/tasks/test-remove-attribute.yml
@@ -3,33 +3,33 @@
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
- - name: Setup test fixture
- copy:
- src: fixtures/ansible-xml-beers.xml
- dest: /tmp/ansible-xml-beers.xml
+- name: Setup test fixture
+ copy:
+ src: fixtures/ansible-xml-beers.xml
+ dest: /tmp/ansible-xml-beers.xml
- - name: Remove '/business/rating/@subjective'
- xml:
- path: /tmp/ansible-xml-beers.xml
- xpath: /business/rating/@subjective
- state: absent
- register: remove_attribute
+- name: Remove '/business/rating/@subjective'
+ xml:
+ path: /tmp/ansible-xml-beers.xml
+ xpath: /business/rating/@subjective
+ state: absent
+ register: remove_attribute
- - name: Add trailing newline
- shell: echo "" >> /tmp/ansible-xml-beers.xml
+- name: Add trailing newline
+ shell: echo "" >> /tmp/ansible-xml-beers.xml
- - name: Compare to expected result
- copy:
- src: results/test-remove-attribute.xml
- dest: /tmp/ansible-xml-beers.xml
- check_mode: true
- diff: true
- register: comparison
+- name: Compare to expected result
+ copy:
+ src: results/test-remove-attribute.xml
+ dest: /tmp/ansible-xml-beers.xml
+ check_mode: true
+ diff: true
+ register: comparison
- - name: Test expected result
- assert:
- that:
+- name: Test expected result
+ assert:
+ that:
- remove_attribute is changed
- comparison is not changed # identical
- #command: diff -u {{ role_path }}/results/test-remove-attribute.xml /tmp/ansible-xml-beers.xml
+ # command: diff -u {{ role_path }}/results/test-remove-attribute.xml /tmp/ansible-xml-beers.xml
diff --git a/tests/integration/targets/xml/tasks/test-remove-element-nochange.yml b/tests/integration/targets/xml/tasks/test-remove-element-nochange.yml
index c1312c5a75..44b1e95a3f 100644
--- a/tests/integration/targets/xml/tasks/test-remove-element-nochange.yml
+++ b/tests/integration/targets/xml/tasks/test-remove-element-nochange.yml
@@ -3,30 +3,30 @@
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
- - name: Setup test fixture
- copy:
- src: results/test-remove-element.xml
- dest: /tmp/ansible-xml-beers.xml
+- name: Setup test fixture
+ copy:
+ src: results/test-remove-element.xml
+ dest: /tmp/ansible-xml-beers.xml
- - name: Remove non-existing '/business/rating'
- xml:
- path: /tmp/ansible-xml-beers.xml
- xpath: /business/rating
- state: absent
- register: remove_element
+- name: Remove non-existing '/business/rating'
+ xml:
+ path: /tmp/ansible-xml-beers.xml
+ xpath: /business/rating
+ state: absent
+ register: remove_element
- - name: Compare to expected result
- copy:
- src: results/test-remove-element.xml
- dest: /tmp/ansible-xml-beers.xml
- check_mode: true
- diff: true
- register: comparison
+- name: Compare to expected result
+ copy:
+ src: results/test-remove-element.xml
+ dest: /tmp/ansible-xml-beers.xml
+ check_mode: true
+ diff: true
+ register: comparison
- - name: Test expected result
- assert:
- that:
+- name: Test expected result
+ assert:
+ that:
- remove_element is not changed
- comparison is not changed # identical
- #command: diff -u {{ role_path }}/results/test-remove-element.xml /tmp/ansible-xml-beers.xml
+ # command: diff -u {{ role_path }}/results/test-remove-element.xml /tmp/ansible-xml-beers.xml
diff --git a/tests/integration/targets/xml/tasks/test-remove-element.yml b/tests/integration/targets/xml/tasks/test-remove-element.yml
index bea376ba93..c2fc081304 100644
--- a/tests/integration/targets/xml/tasks/test-remove-element.yml
+++ b/tests/integration/targets/xml/tasks/test-remove-element.yml
@@ -3,33 +3,33 @@
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
- - name: Setup test fixture
- copy:
- src: fixtures/ansible-xml-beers.xml
- dest: /tmp/ansible-xml-beers.xml
+- name: Setup test fixture
+ copy:
+ src: fixtures/ansible-xml-beers.xml
+ dest: /tmp/ansible-xml-beers.xml
- - name: Remove '/business/rating'
- xml:
- path: /tmp/ansible-xml-beers.xml
- xpath: /business/rating
- state: absent
- register: remove_element
+- name: Remove '/business/rating'
+ xml:
+ path: /tmp/ansible-xml-beers.xml
+ xpath: /business/rating
+ state: absent
+ register: remove_element
- - name: Add trailing newline
- shell: echo "" >> /tmp/ansible-xml-beers.xml
+- name: Add trailing newline
+ shell: echo "" >> /tmp/ansible-xml-beers.xml
- - name: Compare to expected result
- copy:
- src: results/test-remove-element.xml
- dest: /tmp/ansible-xml-beers.xml
- check_mode: true
- diff: true
- register: comparison
+- name: Compare to expected result
+ copy:
+ src: results/test-remove-element.xml
+ dest: /tmp/ansible-xml-beers.xml
+ check_mode: true
+ diff: true
+ register: comparison
- - name: Test expected result
- assert:
- that:
+- name: Test expected result
+ assert:
+ that:
- remove_element is changed
- comparison is not changed # identical
- #command: diff -u {{ role_path }}/results/test-remove-element.xml /tmp/ansible-xml-beers.xml
+ # command: diff -u {{ role_path }}/results/test-remove-element.xml /tmp/ansible-xml-beers.xml
diff --git a/tests/integration/targets/xml/tasks/test-remove-namespaced-attribute-nochange.yml b/tests/integration/targets/xml/tasks/test-remove-namespaced-attribute-nochange.yml
index 61b7179ba0..97855ce77b 100644
--- a/tests/integration/targets/xml/tasks/test-remove-namespaced-attribute-nochange.yml
+++ b/tests/integration/targets/xml/tasks/test-remove-namespaced-attribute-nochange.yml
@@ -3,35 +3,35 @@
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
- - name: Setup test fixture
- copy:
- src: results/test-remove-namespaced-attribute.xml
- dest: /tmp/ansible-xml-namespaced-beers.xml
+- name: Setup test fixture
+ copy:
+ src: results/test-remove-namespaced-attribute.xml
+ dest: /tmp/ansible-xml-namespaced-beers.xml
- - name: Remove non-existing namespaced '/bus:business/rat:rating/@attr:subjective'
- xml:
- path: /tmp/ansible-xml-namespaced-beers.xml
- xpath: /bus:business/rat:rating/@attr:subjective
- namespaces:
- bus: http://test.business
- ber: http://test.beers
- rat: http://test.rating
- attr: http://test.attribute
- state: absent
- register: remove_namespaced_attribute
+- name: Remove non-existing namespaced '/bus:business/rat:rating/@attr:subjective'
+ xml:
+ path: /tmp/ansible-xml-namespaced-beers.xml
+ xpath: /bus:business/rat:rating/@attr:subjective
+ namespaces:
+ bus: http://test.business
+ ber: http://test.beers
+ rat: http://test.rating
+ attr: http://test.attribute
+ state: absent
+ register: remove_namespaced_attribute
- - name: Compare to expected result
- copy:
- src: results/test-remove-namespaced-attribute.xml
- dest: /tmp/ansible-xml-namespaced-beers.xml
- check_mode: true
- diff: true
- register: comparison
+- name: Compare to expected result
+ copy:
+ src: results/test-remove-namespaced-attribute.xml
+ dest: /tmp/ansible-xml-namespaced-beers.xml
+ check_mode: true
+ diff: true
+ register: comparison
- - name: Test expected result
- assert:
- that:
+- name: Test expected result
+ assert:
+ that:
- remove_namespaced_attribute is not changed
- comparison is not changed # identical
- #command: diff -u {{ role_path }}/results/test-remove-namespaced-attribute.xml /tmp/ansible-xml-namespaced-beers.xml
+ # command: diff -u {{ role_path }}/results/test-remove-namespaced-attribute.xml /tmp/ansible-xml-namespaced-beers.xml
diff --git a/tests/integration/targets/xml/tasks/test-remove-namespaced-attribute.yml b/tests/integration/targets/xml/tasks/test-remove-namespaced-attribute.yml
index a725ee79cf..45e37d41d4 100644
--- a/tests/integration/targets/xml/tasks/test-remove-namespaced-attribute.yml
+++ b/tests/integration/targets/xml/tasks/test-remove-namespaced-attribute.yml
@@ -3,38 +3,38 @@
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
- - name: Setup test fixture
- copy:
- src: fixtures/ansible-xml-namespaced-beers.xml
- dest: /tmp/ansible-xml-namespaced-beers.xml
+- name: Setup test fixture
+ copy:
+ src: fixtures/ansible-xml-namespaced-beers.xml
+ dest: /tmp/ansible-xml-namespaced-beers.xml
- - name: Remove namespaced '/bus:business/rat:rating/@attr:subjective'
- xml:
- path: /tmp/ansible-xml-namespaced-beers.xml
- xpath: /bus:business/rat:rating/@attr:subjective
- namespaces:
- bus: http://test.business
- ber: http://test.beers
- rat: http://test.rating
- attr: http://test.attribute
- state: absent
- register: remove_namespaced_attribute
+- name: Remove namespaced '/bus:business/rat:rating/@attr:subjective'
+ xml:
+ path: /tmp/ansible-xml-namespaced-beers.xml
+ xpath: /bus:business/rat:rating/@attr:subjective
+ namespaces:
+ bus: http://test.business
+ ber: http://test.beers
+ rat: http://test.rating
+ attr: http://test.attribute
+ state: absent
+ register: remove_namespaced_attribute
- - name: Add trailing newline
- shell: echo "" >> /tmp/ansible-xml-namespaced-beers.xml
+- name: Add trailing newline
+ shell: echo "" >> /tmp/ansible-xml-namespaced-beers.xml
- - name: Compare to expected result
- copy:
- src: results/test-remove-namespaced-attribute.xml
- dest: /tmp/ansible-xml-namespaced-beers.xml
- check_mode: true
- diff: true
- register: comparison
+- name: Compare to expected result
+ copy:
+ src: results/test-remove-namespaced-attribute.xml
+ dest: /tmp/ansible-xml-namespaced-beers.xml
+ check_mode: true
+ diff: true
+ register: comparison
- - name: Test expected result
- assert:
- that:
+- name: Test expected result
+ assert:
+ that:
- remove_namespaced_attribute is changed
- comparison is not changed # identical
- #command: diff -u {{ role_path }}/results/test-remove-namespaced-attribute.xml /tmp/ansible-xml-namespaced-beers.xml
+ # command: diff -u {{ role_path }}/results/test-remove-namespaced-attribute.xml /tmp/ansible-xml-namespaced-beers.xml
diff --git a/tests/integration/targets/xml/tasks/test-remove-namespaced-element-nochange.yml b/tests/integration/targets/xml/tasks/test-remove-namespaced-element-nochange.yml
index fd83c54c32..bda8643643 100644
--- a/tests/integration/targets/xml/tasks/test-remove-namespaced-element-nochange.yml
+++ b/tests/integration/targets/xml/tasks/test-remove-namespaced-element-nochange.yml
@@ -3,35 +3,35 @@
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
- - name: Setup test fixture
- copy:
- src: results/test-remove-element.xml
- dest: /tmp/ansible-xml-namespaced-beers.xml
+- name: Setup test fixture
+ copy:
+ src: results/test-remove-element.xml
+ dest: /tmp/ansible-xml-namespaced-beers.xml
- - name: Remove non-existing namespaced '/bus:business/rat:rating'
- xml:
- path: /tmp/ansible-xml-namespaced-beers.xml
- xpath: /bus:business/rat:rating
- namespaces:
- bus: http://test.business
- ber: http://test.beers
- rat: http://test.rating
- attr: http://test.attribute
- state: absent
- register: remove_namespaced_element
+- name: Remove non-existing namespaced '/bus:business/rat:rating'
+ xml:
+ path: /tmp/ansible-xml-namespaced-beers.xml
+ xpath: /bus:business/rat:rating
+ namespaces:
+ bus: http://test.business
+ ber: http://test.beers
+ rat: http://test.rating
+ attr: http://test.attribute
+ state: absent
+ register: remove_namespaced_element
- - name: Compare to expected result
- copy:
- src: results/test-remove-element.xml
- dest: /tmp/ansible-xml-namespaced-beers.xml
- check_mode: true
- diff: true
- register: comparison
+- name: Compare to expected result
+ copy:
+ src: results/test-remove-element.xml
+ dest: /tmp/ansible-xml-namespaced-beers.xml
+ check_mode: true
+ diff: true
+ register: comparison
- - name: Test expected result
- assert:
- that:
+- name: Test expected result
+ assert:
+ that:
- remove_namespaced_element is not changed
- comparison is not changed # identical
- #command: diff -u {{ role_path }}/results/test-remove-element.xml /tmp/ansible-xml-namespaced-beers.xml
+ # command: diff -u {{ role_path }}/results/test-remove-element.xml /tmp/ansible-xml-namespaced-beers.xml
diff --git a/tests/integration/targets/xml/tasks/test-remove-namespaced-element.yml b/tests/integration/targets/xml/tasks/test-remove-namespaced-element.yml
index c4129f33e2..4bbab437ca 100644
--- a/tests/integration/targets/xml/tasks/test-remove-namespaced-element.yml
+++ b/tests/integration/targets/xml/tasks/test-remove-namespaced-element.yml
@@ -3,38 +3,38 @@
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
- - name: Setup test fixture
- copy:
- src: fixtures/ansible-xml-namespaced-beers.xml
- dest: /tmp/ansible-xml-namespaced-beers.xml
+- name: Setup test fixture
+ copy:
+ src: fixtures/ansible-xml-namespaced-beers.xml
+ dest: /tmp/ansible-xml-namespaced-beers.xml
- - name: Remove namespaced '/bus:business/rat:rating'
- xml:
- path: /tmp/ansible-xml-namespaced-beers.xml
- xpath: /bus:business/rat:rating
- namespaces:
- bus: http://test.business
- ber: http://test.beers
- rat: http://test.rating
- attr: http://test.attribute
- state: absent
- register: remove_namespaced_element
+- name: Remove namespaced '/bus:business/rat:rating'
+ xml:
+ path: /tmp/ansible-xml-namespaced-beers.xml
+ xpath: /bus:business/rat:rating
+ namespaces:
+ bus: http://test.business
+ ber: http://test.beers
+ rat: http://test.rating
+ attr: http://test.attribute
+ state: absent
+ register: remove_namespaced_element
- - name: Add trailing newline
- shell: echo "" >> /tmp/ansible-xml-namespaced-beers.xml
+- name: Add trailing newline
+ shell: echo "" >> /tmp/ansible-xml-namespaced-beers.xml
- - name: Compare to expected result
- copy:
- src: results/test-remove-element.xml
- dest: /tmp/ansible-xml-namespaced-beers.xml
- check_mode: true
- diff: true
- register: comparison
+- name: Compare to expected result
+ copy:
+ src: results/test-remove-element.xml
+ dest: /tmp/ansible-xml-namespaced-beers.xml
+ check_mode: true
+ diff: true
+ register: comparison
- - name: Test expected result
- assert:
- that:
+- name: Test expected result
+ assert:
+ that:
- remove_namespaced_element is changed
- comparison is not changed # identical
- #command: diff -u {{ role_path }}/results/test-remove-element.xml /tmp/ansible-xml-namespaced-beers.xml
+ # command: diff -u {{ role_path }}/results/test-remove-element.xml /tmp/ansible-xml-namespaced-beers.xml
diff --git a/tests/integration/targets/xml/tasks/test-set-attribute-value-unicode.yml b/tests/integration/targets/xml/tasks/test-set-attribute-value-unicode.yml
index bf35bfdd95..a64442a215 100644
--- a/tests/integration/targets/xml/tasks/test-set-attribute-value-unicode.yml
+++ b/tests/integration/targets/xml/tasks/test-set-attribute-value-unicode.yml
@@ -3,34 +3,34 @@
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
- - name: Setup test fixture
- copy:
- src: fixtures/ansible-xml-beers.xml
- dest: /tmp/ansible-xml-beers.xml
+- name: Setup test fixture
+ copy:
+ src: fixtures/ansible-xml-beers.xml
+ dest: /tmp/ansible-xml-beers.xml
- - name: Set '/business/rating/@subjective' to 'нет'
- xml:
- path: /tmp/ansible-xml-beers.xml
- xpath: /business/rating
- attribute: subjective
- value: нет
- register: set_attribute_value_unicode
+- name: Set '/business/rating/@subjective' to 'нет'
+ xml:
+ path: /tmp/ansible-xml-beers.xml
+ xpath: /business/rating
+ attribute: subjective
+ value: нет
+ register: set_attribute_value_unicode
- - name: Add trailing newline
- shell: echo "" >> /tmp/ansible-xml-beers.xml
+- name: Add trailing newline
+ shell: echo "" >> /tmp/ansible-xml-beers.xml
- - name: Compare to expected result
- copy:
- src: results/test-set-attribute-value-unicode.xml
- dest: /tmp/ansible-xml-beers.xml
- check_mode: true
- diff: true
- register: comparison
+- name: Compare to expected result
+ copy:
+ src: results/test-set-attribute-value-unicode.xml
+ dest: /tmp/ansible-xml-beers.xml
+ check_mode: true
+ diff: true
+ register: comparison
- - name: Test expected result
- assert:
- that:
+- name: Test expected result
+ assert:
+ that:
- set_attribute_value_unicode is changed
- comparison is not changed # identical
- #command: diff -u {{ role_path }}/results/test-set-attribute-value-unicode.xml /tmp/ansible-xml-beers.xml
+ # command: diff -u {{ role_path }}/results/test-set-attribute-value-unicode.xml /tmp/ansible-xml-beers.xml
diff --git a/tests/integration/targets/xml/tasks/test-set-attribute-value.yml b/tests/integration/targets/xml/tasks/test-set-attribute-value.yml
index 2908e00aa3..a5229a61f5 100644
--- a/tests/integration/targets/xml/tasks/test-set-attribute-value.yml
+++ b/tests/integration/targets/xml/tasks/test-set-attribute-value.yml
@@ -3,34 +3,34 @@
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
- - name: Setup test fixture
- copy:
- src: fixtures/ansible-xml-beers.xml
- dest: /tmp/ansible-xml-beers.xml
+- name: Setup test fixture
+ copy:
+ src: fixtures/ansible-xml-beers.xml
+ dest: /tmp/ansible-xml-beers.xml
- - name: Set '/business/rating/@subjective' to 'false'
- xml:
- path: /tmp/ansible-xml-beers.xml
- xpath: /business/rating
- attribute: subjective
- value: 'false'
- register: set_attribute_value
+- name: Set '/business/rating/@subjective' to 'false'
+ xml:
+ path: /tmp/ansible-xml-beers.xml
+ xpath: /business/rating
+ attribute: subjective
+ value: 'false'
+ register: set_attribute_value
- - name: Add trailing newline
- shell: echo "" >> /tmp/ansible-xml-beers.xml
+- name: Add trailing newline
+ shell: echo "" >> /tmp/ansible-xml-beers.xml
- - name: Compare to expected result
- copy:
- src: results/test-set-attribute-value.xml
- dest: /tmp/ansible-xml-beers.xml
- check_mode: true
- diff: true
- register: comparison
+- name: Compare to expected result
+ copy:
+ src: results/test-set-attribute-value.xml
+ dest: /tmp/ansible-xml-beers.xml
+ check_mode: true
+ diff: true
+ register: comparison
- - name: Test expected result
- assert:
- that:
+- name: Test expected result
+ assert:
+ that:
- set_attribute_value is changed
- comparison is not changed # identical
- #command: diff -u {{ role_path }}/results/test-set-attribute-value.xml /tmp/ansible-xml-beers.xml
+ # command: diff -u {{ role_path }}/results/test-set-attribute-value.xml /tmp/ansible-xml-beers.xml
diff --git a/tests/integration/targets/xml/tasks/test-set-children-elements-level.yml b/tests/integration/targets/xml/tasks/test-set-children-elements-level.yml
index 648f5b25af..48d0de64a3 100644
--- a/tests/integration/targets/xml/tasks/test-set-children-elements-level.yml
+++ b/tests/integration/targets/xml/tasks/test-set-children-elements-level.yml
@@ -3,79 +3,79 @@
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
- - name: Setup test fixture
- copy:
- src: fixtures/ansible-xml-beers.xml
- dest: /tmp/ansible-xml-beers.xml
+- name: Setup test fixture
+ copy:
+ src: fixtures/ansible-xml-beers.xml
+ dest: /tmp/ansible-xml-beers.xml
- - name: Set child elements
- xml:
- path: /tmp/ansible-xml-beers.xml
- xpath: /business/beers
- set_children: &children
- - beer:
- alcohol: "0.5"
- name: 90 Minute IPA
- _:
- - Water:
- liter: "0.2"
- quantity: 200g
- - Starch:
- quantity: 10g
- - Hops:
- quantity: 50g
- - Yeast:
- quantity: 20g
- - beer:
- alcohol: "0.3"
- name: Harvest Pumpkin Ale
- _:
- - Water:
- liter: "0.2"
- quantity: 200g
- - Hops:
- quantity: 25g
- - Yeast:
- quantity: 20g
- register: set_children_elements_level
+- name: Set child elements
+ xml:
+ path: /tmp/ansible-xml-beers.xml
+ xpath: /business/beers
+ set_children: &children
+ - beer:
+ alcohol: "0.5"
+ name: 90 Minute IPA
+ _:
+ - Water:
+ liter: "0.2"
+ quantity: 200g
+ - Starch:
+ quantity: 10g
+ - Hops:
+ quantity: 50g
+ - Yeast:
+ quantity: 20g
+ - beer:
+ alcohol: "0.3"
+ name: Harvest Pumpkin Ale
+ _:
+ - Water:
+ liter: "0.2"
+ quantity: 200g
+ - Hops:
+ quantity: 25g
+ - Yeast:
+ quantity: 20g
+ register: set_children_elements_level
- - name: Add trailing newline
- shell: echo "" >> /tmp/ansible-xml-beers.xml
+- name: Add trailing newline
+ shell: echo "" >> /tmp/ansible-xml-beers.xml
- - name: Compare to expected result
- copy:
- src: results/test-set-children-elements-level.xml
- dest: /tmp/ansible-xml-beers.xml
- check_mode: true
- diff: true
- register: comparison
+- name: Compare to expected result
+ copy:
+ src: results/test-set-children-elements-level.xml
+ dest: /tmp/ansible-xml-beers.xml
+ check_mode: true
+ diff: true
+ register: comparison
- - name: Test expected result
- assert:
- that:
+- name: Test expected result
+ assert:
+ that:
- set_children_elements_level is changed
- comparison is not changed # identical
- #command: diff -u {{ role_path }}/results/test-set-children-elements-level.xml /tmp/ansible-xml-beers.xml
+ # command: diff -u {{ role_path }}/results/test-set-children-elements-level.xml /tmp/ansible-xml-beers.xml
- - name: Set child elements (again)
- xml:
- path: /tmp/ansible-xml-beers.xml
- xpath: /business/beers
- set_children: *children
- register: set_children_again
+- name: Set child elements (again)
+ xml:
+ path: /tmp/ansible-xml-beers.xml
+ xpath: /business/beers
+ set_children: *children
+ register: set_children_again
- - name: Compare to expected result
- copy:
- src: results/test-set-children-elements-level.xml
- dest: /tmp/ansible-xml-beers.xml
- check_mode: true
- diff: true
- register: comparison
+- name: Compare to expected result
+ copy:
+ src: results/test-set-children-elements-level.xml
+ dest: /tmp/ansible-xml-beers.xml
+ check_mode: true
+ diff: true
+ register: comparison
- - name: Test expected result
- assert:
- that:
+- name: Test expected result
+ assert:
+ that:
- set_children_again is not changed
- comparison is not changed # identical
diff --git a/tests/integration/targets/xml/tasks/test-set-children-elements-unicode.yml b/tests/integration/targets/xml/tasks/test-set-children-elements-unicode.yml
index 8c4fc10941..f890c01bc4 100644
--- a/tests/integration/targets/xml/tasks/test-set-children-elements-unicode.yml
+++ b/tests/integration/targets/xml/tasks/test-set-children-elements-unicode.yml
@@ -3,51 +3,51 @@
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
- - name: Setup test fixture
- copy:
- src: fixtures/ansible-xml-beers.xml
- dest: /tmp/ansible-xml-beers.xml
+- name: Setup test fixture
+ copy:
+ src: fixtures/ansible-xml-beers.xml
+ dest: /tmp/ansible-xml-beers.xml
- - name: Set child elements
- xml:
- path: /tmp/ansible-xml-beers.xml
- xpath: /business/beers
- set_children: &children
+- name: Set child elements
+ xml:
+ path: /tmp/ansible-xml-beers.xml
+ xpath: /business/beers
+ set_children: &children
- beer: Окское
- beer: Невское
- register: set_children_elements_unicode
+ register: set_children_elements_unicode
- - name: Add trailing newline
- shell: echo "" >> /tmp/ansible-xml-beers.xml
+- name: Add trailing newline
+ shell: echo "" >> /tmp/ansible-xml-beers.xml
- - name: Compare to expected result
- copy:
- src: results/test-set-children-elements-unicode.xml
- dest: /tmp/ansible-xml-beers.xml
- check_mode: true
- diff: true
- register: comparison
+- name: Compare to expected result
+ copy:
+ src: results/test-set-children-elements-unicode.xml
+ dest: /tmp/ansible-xml-beers.xml
+ check_mode: true
+ diff: true
+ register: comparison
- - name: Test expected result
- assert:
- that:
+- name: Test expected result
+ assert:
+ that:
- set_children_elements_unicode is changed
- comparison is not changed # identical
- #command: diff -u {{ role_path }}/results/test-set-children-elements-unicode.xml /tmp/ansible-xml-beers.xml
+ # command: diff -u {{ role_path }}/results/test-set-children-elements-unicode.xml /tmp/ansible-xml-beers.xml
- - name: Compare to expected result
- copy:
- src: results/test-set-children-elements-unicode.xml
- dest: /tmp/ansible-xml-beers.xml
- check_mode: true
- diff: true
- register: comparison
+- name: Compare to expected result
+ copy:
+ src: results/test-set-children-elements-unicode.xml
+ dest: /tmp/ansible-xml-beers.xml
+ check_mode: true
+ diff: true
+ register: comparison
- - name: Test expected result
- assert:
- that:
+- name: Test expected result
+ assert:
+ that:
- set_children_again is not changed
- comparison is not changed # identical
- #command: diff -u {{ role_path }}/results/test-set-children-elements-unicode.xml /tmp/ansible-xml-beers.xml
+ # command: diff -u {{ role_path }}/results/test-set-children-elements-unicode.xml /tmp/ansible-xml-beers.xml
diff --git a/tests/integration/targets/xml/tasks/test-set-children-elements-value.yml b/tests/integration/targets/xml/tasks/test-set-children-elements-value.yml
new file mode 100644
index 0000000000..17ed24d283
--- /dev/null
+++ b/tests/integration/targets/xml/tasks/test-set-children-elements-value.yml
@@ -0,0 +1,64 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Setup test fixture
+ copy:
+ src: fixtures/ansible-xml-beers.xml
+ dest: /tmp/ansible-xml-beers.xml
+
+
+- name: Set child elements
+ xml:
+ path: /tmp/ansible-xml-beers.xml
+ xpath: /business/beers
+ set_children: &children
+ - beer:
+ alcohol: "0.5"
+ name: 90 Minute IPA
+ +value: "2"
+ - beer:
+ alcohol: "0.3"
+ name: Harvest Pumpkin Ale
+ +value: "5"
+ register: set_children_elements_value
+
+- name: Add trailing newline
+ shell: echo "" >> /tmp/ansible-xml-beers.xml
+
+- name: Compare to expected result
+ copy:
+ src: results/test-set-children-elements-value.xml
+ dest: /tmp/ansible-xml-beers.xml
+ check_mode: true
+ diff: true
+ register: comparison
+
+- name: Test expected result
+ assert:
+ that:
+ - set_children_elements_value is changed
+ - comparison is not changed # identical
+
+
+- name: Set child elements (again)
+ xml:
+ path: /tmp/ansible-xml-beers.xml
+ xpath: /business/beers
+ set_children: *children
+ register: set_children_again
+
+- name: Compare to expected result
+ copy:
+ src: results/test-set-children-elements-value.xml
+ dest: /tmp/ansible-xml-beers.xml
+ check_mode: true
+ diff: true
+ register: comparison
+
+- name: Test expected result
+ assert:
+ that:
+ - set_children_again is not changed
+ - comparison is not changed # identical
diff --git a/tests/integration/targets/xml/tasks/test-set-children-elements.yml b/tests/integration/targets/xml/tasks/test-set-children-elements.yml
index ed9e4a54ee..d2987e83f4 100644
--- a/tests/integration/targets/xml/tasks/test-set-children-elements.yml
+++ b/tests/integration/targets/xml/tasks/test-set-children-elements.yml
@@ -3,84 +3,84 @@
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
- - name: Setup test fixture
- copy:
- src: fixtures/ansible-xml-beers.xml
- dest: /tmp/ansible-xml-beers.xml
+- name: Setup test fixture
+ copy:
+ src: fixtures/ansible-xml-beers.xml
+ dest: /tmp/ansible-xml-beers.xml
- - name: Set child elements - empty list
- xml:
- path: /tmp/ansible-xml-beers.xml
- xpath: /business/beers
- set_children: []
- register: set_children_elements
+- name: Set child elements - empty list
+ xml:
+ path: /tmp/ansible-xml-beers.xml
+ xpath: /business/beers
+ set_children: []
+ register: set_children_elements
- - name: Compare to expected result
- copy:
- src: results/test-set-children-elements-empty-list.xml
- dest: /tmp/ansible-xml-beers.xml
- check_mode: true
- diff: true
- register: comparison
+- name: Compare to expected result
+ copy:
+ src: results/test-set-children-elements-empty-list.xml
+ dest: /tmp/ansible-xml-beers.xml
+ check_mode: true
+ diff: true
+ register: comparison
- - name: Test expected result
- assert:
- that:
+- name: Test expected result
+ assert:
+ that:
- set_children_elements is changed
- comparison is not changed # identical
- #command: diff -u {{ role_path }}/results/test-set-children-elements.xml /tmp/ansible-xml-beers.xml
+ # command: diff -u {{ role_path }}/results/test-set-children-elements.xml /tmp/ansible-xml-beers.xml
- - name: Setup test fixture
- copy:
- src: fixtures/ansible-xml-beers.xml
- dest: /tmp/ansible-xml-beers.xml
+- name: Setup test fixture
+ copy:
+ src: fixtures/ansible-xml-beers.xml
+ dest: /tmp/ansible-xml-beers.xml
- - name: Set child elements
- xml:
- path: /tmp/ansible-xml-beers.xml
- xpath: /business/beers
- set_children: &children
- - beer: 90 Minute IPA
- - beer: Harvest Pumpkin Ale
- register: set_children_elements
+- name: Set child elements
+ xml:
+ path: /tmp/ansible-xml-beers.xml
+ xpath: /business/beers
+ set_children: &children
+ - beer: 90 Minute IPA
+ - beer: Harvest Pumpkin Ale
+ register: set_children_elements
- - name: Add trailing newline
- shell: echo "" >> /tmp/ansible-xml-beers.xml
+- name: Add trailing newline
+ shell: echo "" >> /tmp/ansible-xml-beers.xml
- - name: Compare to expected result
- copy:
- src: results/test-set-children-elements.xml
- dest: /tmp/ansible-xml-beers.xml
- check_mode: true
- diff: true
- register: comparison
+- name: Compare to expected result
+ copy:
+ src: results/test-set-children-elements.xml
+ dest: /tmp/ansible-xml-beers.xml
+ check_mode: true
+ diff: true
+ register: comparison
- - name: Test expected result
- assert:
- that:
+- name: Test expected result
+ assert:
+ that:
- set_children_elements is changed
- comparison is not changed # identical
- #command: diff -u {{ role_path }}/results/test-set-children-elements.xml /tmp/ansible-xml-beers.xml
+ # command: diff -u {{ role_path }}/results/test-set-children-elements.xml /tmp/ansible-xml-beers.xml
- - name: Set child elements (again)
- xml:
- path: /tmp/ansible-xml-beers.xml
- xpath: /business/beers
- set_children: *children
- register: set_children_again
+- name: Set child elements (again)
+ xml:
+ path: /tmp/ansible-xml-beers.xml
+ xpath: /business/beers
+ set_children: *children
+ register: set_children_again
- - name: Compare to expected result
- copy:
- src: results/test-set-children-elements.xml
- dest: /tmp/ansible-xml-beers.xml
- check_mode: true
- diff: true
- register: comparison
+- name: Compare to expected result
+ copy:
+ src: results/test-set-children-elements.xml
+ dest: /tmp/ansible-xml-beers.xml
+ check_mode: true
+ diff: true
+ register: comparison
- - name: Test expected result
- assert:
- that:
+- name: Test expected result
+ assert:
+ that:
- set_children_again is not changed
- comparison is not changed # identical
- #command: diff -u {{ role_path }}/results/test-set-children-elements.xml /tmp/ansible-xml-beers.xml
+ # command: diff -u {{ role_path }}/results/test-set-children-elements.xml /tmp/ansible-xml-beers.xml
diff --git a/tests/integration/targets/xml/tasks/test-set-element-value-empty.yml b/tests/integration/targets/xml/tasks/test-set-element-value-empty.yml
index 4041bf9106..3c5212db45 100644
--- a/tests/integration/targets/xml/tasks/test-set-element-value-empty.yml
+++ b/tests/integration/targets/xml/tasks/test-set-element-value-empty.yml
@@ -3,33 +3,33 @@
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
- - name: Setup test fixture
- copy:
- src: fixtures/ansible-xml-beers.xml
- dest: /tmp/ansible-xml-beers.xml
+- name: Setup test fixture
+ copy:
+ src: fixtures/ansible-xml-beers.xml
+ dest: /tmp/ansible-xml-beers.xml
- - name: Set '/business/website/address' to empty string.
- xml:
- path: /tmp/ansible-xml-beers.xml
- xpath: /business/website/address
- value: ''
- register: set_element_value_empty
+- name: Set '/business/website/address' to empty string.
+ xml:
+ path: /tmp/ansible-xml-beers.xml
+ xpath: /business/website/address
+ value: ''
+ register: set_element_value_empty
- - name: Add trailing newline
- shell: echo "" >> /tmp/ansible-xml-beers.xml
+- name: Add trailing newline
+ shell: echo "" >> /tmp/ansible-xml-beers.xml
- - name: Compare to expected result
- copy:
- src: results/test-set-element-value-empty.xml
- dest: /tmp/ansible-xml-beers.xml
- check_mode: true
- diff: true
- register: comparison
+- name: Compare to expected result
+ copy:
+ src: results/test-set-element-value-empty.xml
+ dest: /tmp/ansible-xml-beers.xml
+ check_mode: true
+ diff: true
+ register: comparison
- - name: Test expected result
- assert:
- that:
+- name: Test expected result
+ assert:
+ that:
- set_element_value_empty is changed
- comparison is not changed # identical
- #command: diff -u {{ role_path }}/results/test-set-element-value-empty.xml /tmp/ansible-xml-beers.xml
+ # command: diff -u {{ role_path }}/results/test-set-element-value-empty.xml /tmp/ansible-xml-beers.xml
diff --git a/tests/integration/targets/xml/tasks/test-set-element-value-unicode.yml b/tests/integration/targets/xml/tasks/test-set-element-value-unicode.yml
index 616f26ddc8..7dec91b920 100644
--- a/tests/integration/targets/xml/tasks/test-set-element-value-unicode.yml
+++ b/tests/integration/targets/xml/tasks/test-set-element-value-unicode.yml
@@ -3,48 +3,48 @@
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
- - name: Setup test fixture
- copy:
- src: fixtures/ansible-xml-beers.xml
- dest: /tmp/ansible-xml-beers.xml
+- name: Setup test fixture
+ copy:
+ src: fixtures/ansible-xml-beers.xml
+ dest: /tmp/ansible-xml-beers.xml
- - name: Add 2nd '/business/rating' with value 'пять'
- xml:
- path: /tmp/ansible-xml-beers.xml
- xpath: /business
- add_children:
+- name: Add 2nd '/business/rating' with value 'пять'
+ xml:
+ path: /tmp/ansible-xml-beers.xml
+ xpath: /business
+ add_children:
- rating: пять
- - name: Set '/business/rating' to 'пять'
- xml:
- path: /tmp/ansible-xml-beers.xml
- xpath: /business/rating
- value: пять
- register: set_element_first_run
+- name: Set '/business/rating' to 'пять'
+ xml:
+ path: /tmp/ansible-xml-beers.xml
+ xpath: /business/rating
+ value: пять
+ register: set_element_first_run
- - name: Set '/business/rating' to 'false'... again
- xml:
- path: /tmp/ansible-xml-beers.xml
- xpath: /business/rating
- value: пять
- register: set_element_second_run
+- name: Set '/business/rating' to 'false'... again
+ xml:
+ path: /tmp/ansible-xml-beers.xml
+ xpath: /business/rating
+ value: пять
+ register: set_element_second_run
- - name: Add trailing newline
- shell: echo "" >> /tmp/ansible-xml-beers.xml
+- name: Add trailing newline
+ shell: echo "" >> /tmp/ansible-xml-beers.xml
- - name: Compare to expected result
- copy:
- src: results/test-set-element-value-unicode.xml
- dest: /tmp/ansible-xml-beers.xml
- check_mode: true
- diff: true
- register: comparison
+- name: Compare to expected result
+ copy:
+ src: results/test-set-element-value-unicode.xml
+ dest: /tmp/ansible-xml-beers.xml
+ check_mode: true
+ diff: true
+ register: comparison
- - name: Test expected result
- assert:
- that:
+- name: Test expected result
+ assert:
+ that:
- set_element_first_run is changed
- set_element_second_run is not changed
- comparison is not changed # identical
- #command: diff -u {{ role_path }}/results/test-set-element-value-unicode.xml /tmp/ansible-xml-beers.xml
+ # command: diff -u {{ role_path }}/results/test-set-element-value-unicode.xml /tmp/ansible-xml-beers.xml
diff --git a/tests/integration/targets/xml/tasks/test-set-element-value.yml b/tests/integration/targets/xml/tasks/test-set-element-value.yml
index b563b25766..83b0840bac 100644
--- a/tests/integration/targets/xml/tasks/test-set-element-value.yml
+++ b/tests/integration/targets/xml/tasks/test-set-element-value.yml
@@ -3,48 +3,48 @@
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
- - name: Setup test fixture
- copy:
- src: fixtures/ansible-xml-beers.xml
- dest: /tmp/ansible-xml-beers.xml
+- name: Setup test fixture
+ copy:
+ src: fixtures/ansible-xml-beers.xml
+ dest: /tmp/ansible-xml-beers.xml
- - name: Add 2nd '/business/rating' with value '5'
- xml:
- path: /tmp/ansible-xml-beers.xml
- xpath: /business
- add_children:
+- name: Add 2nd '/business/rating' with value '5'
+ xml:
+ path: /tmp/ansible-xml-beers.xml
+ xpath: /business
+ add_children:
- rating: '5'
- - name: Set '/business/rating' to '5'
- xml:
- path: /tmp/ansible-xml-beers.xml
- xpath: /business/rating
- value: '5'
- register: set_element_first_run
+- name: Set '/business/rating' to '5'
+ xml:
+ path: /tmp/ansible-xml-beers.xml
+ xpath: /business/rating
+ value: '5'
+ register: set_element_first_run
- - name: Set '/business/rating' to '5'... again
- xml:
- path: /tmp/ansible-xml-beers.xml
- xpath: /business/rating
- value: '5'
- register: set_element_second_run
+- name: Set '/business/rating' to '5'... again
+ xml:
+ path: /tmp/ansible-xml-beers.xml
+ xpath: /business/rating
+ value: '5'
+ register: set_element_second_run
- - name: Add trailing newline
- shell: echo "" >> /tmp/ansible-xml-beers.xml
+- name: Add trailing newline
+ shell: echo "" >> /tmp/ansible-xml-beers.xml
- - name: Compare to expected result
- copy:
- src: results/test-set-element-value.xml
- dest: /tmp/ansible-xml-beers.xml
- check_mode: true
- diff: true
- register: comparison
+- name: Compare to expected result
+ copy:
+ src: results/test-set-element-value.xml
+ dest: /tmp/ansible-xml-beers.xml
+ check_mode: true
+ diff: true
+ register: comparison
- - name: Test expected result
- assert:
- that:
+- name: Test expected result
+ assert:
+ that:
- set_element_first_run is changed
- set_element_second_run is not changed
- comparison is not changed # identical
- #command: diff -u {{ role_path }}/results/test-set-element-value.xml /tmp/ansible-xml-beers.xml
+ # command: diff -u {{ role_path }}/results/test-set-element-value.xml /tmp/ansible-xml-beers.xml
diff --git a/tests/integration/targets/xml/tasks/test-set-namespaced-attribute-value.yml b/tests/integration/targets/xml/tasks/test-set-namespaced-attribute-value.yml
index 7c1bbd2376..0c1992730d 100644
--- a/tests/integration/targets/xml/tasks/test-set-namespaced-attribute-value.yml
+++ b/tests/integration/targets/xml/tasks/test-set-namespaced-attribute-value.yml
@@ -3,39 +3,39 @@
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
- - name: Setup test fixture
- copy:
- src: fixtures/ansible-xml-namespaced-beers.xml
- dest: /tmp/ansible-xml-namespaced-beers.xml
+- name: Setup test fixture
+ copy:
+ src: fixtures/ansible-xml-namespaced-beers.xml
+ dest: /tmp/ansible-xml-namespaced-beers.xml
- - name: Set namespaced '/bus:business/rat:rating/@attr:subjective' to 'false'
- xml:
- path: /tmp/ansible-xml-namespaced-beers.xml
- xpath: /bus:business/rat:rating
- namespaces:
- bus: http://test.business
- ber: http://test.beers
- rat: http://test.rating
- attr: http://test.attribute
- attribute: attr:subjective
- value: 'false'
- register: set_namespaced_attribute_value
+- name: Set namespaced '/bus:business/rat:rating/@attr:subjective' to 'false'
+ xml:
+ path: /tmp/ansible-xml-namespaced-beers.xml
+ xpath: /bus:business/rat:rating
+ namespaces:
+ bus: http://test.business
+ ber: http://test.beers
+ rat: http://test.rating
+ attr: http://test.attribute
+ attribute: attr:subjective
+ value: 'false'
+ register: set_namespaced_attribute_value
- - name: Add trailing newline
- shell: echo "" >> /tmp/ansible-xml-namespaced-beers.xml
+- name: Add trailing newline
+ shell: echo "" >> /tmp/ansible-xml-namespaced-beers.xml
- - name: Compare to expected result
- copy:
- src: results/test-set-namespaced-attribute-value.xml
- dest: /tmp/ansible-xml-namespaced-beers.xml
- check_mode: true
- diff: true
- register: comparison
+- name: Compare to expected result
+ copy:
+ src: results/test-set-namespaced-attribute-value.xml
+ dest: /tmp/ansible-xml-namespaced-beers.xml
+ check_mode: true
+ diff: true
+ register: comparison
- - name: Test expected result
- assert:
- that:
+- name: Test expected result
+ assert:
+ that:
- set_namespaced_attribute_value is changed
- comparison is not changed # identical
- #command: diff -u {{ role_path }}/results/test-set-namespaced-attribute-value.xml /tmp/ansible-xml-namespaced-beers.xml
+ # command: diff -u {{ role_path }}/results/test-set-namespaced-attribute-value.xml /tmp/ansible-xml-namespaced-beers.xml
diff --git a/tests/integration/targets/xml/tasks/test-set-namespaced-children-elements.yml b/tests/integration/targets/xml/tasks/test-set-namespaced-children-elements.yml
index e6ed1bdecc..dbda409d07 100644
--- a/tests/integration/targets/xml/tasks/test-set-namespaced-children-elements.yml
+++ b/tests/integration/targets/xml/tasks/test-set-namespaced-children-elements.yml
@@ -3,59 +3,59 @@
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
- - name: Setup test fixture
- copy:
- src: fixtures/ansible-xml-namespaced-beers.xml
- dest: /tmp/ansible-xml-namespaced-beers-xml.xml
+- name: Setup test fixture
+ copy:
+ src: fixtures/ansible-xml-namespaced-beers.xml
+ dest: /tmp/ansible-xml-namespaced-beers-xml.xml
- - name: Set child elements
- xml:
- path: /tmp/ansible-xml-namespaced-beers-xml.xml
- xpath: /bus:business/ber:beers
- namespaces:
- bus: http://test.business
- ber: http://test.beers
- set_children:
+- name: Set child elements
+ xml:
+ path: /tmp/ansible-xml-namespaced-beers-xml.xml
+ xpath: /bus:business/ber:beers
+ namespaces:
+ bus: http://test.business
+ ber: http://test.beers
+ set_children:
- beer: 90 Minute IPA
- beer: Harvest Pumpkin Ale
- - name: Copy state after first set_children
- copy:
- src: /tmp/ansible-xml-namespaced-beers.xml
- dest: /tmp/ansible-xml-namespaced-beers-1.xml
- remote_src: true
+- name: Copy state after first set_children
+ copy:
+ src: /tmp/ansible-xml-namespaced-beers.xml
+ dest: /tmp/ansible-xml-namespaced-beers-1.xml
+ remote_src: true
- - name: Set child elements again
- xml:
- path: /tmp/ansible-xml-namespaced-beers-xml.xml
- xpath: /bus:business/ber:beers
- namespaces:
- bus: http://test.business
- ber: http://test.beers
- set_children:
+- name: Set child elements again
+ xml:
+ path: /tmp/ansible-xml-namespaced-beers-xml.xml
+ xpath: /bus:business/ber:beers
+ namespaces:
+ bus: http://test.business
+ ber: http://test.beers
+ set_children:
- beer: 90 Minute IPA
- beer: Harvest Pumpkin Ale
- register: set_children_again
+ register: set_children_again
- - name: Copy state after second set_children
- copy:
- src: /tmp/ansible-xml-namespaced-beers.xml
- dest: /tmp/ansible-xml-namespaced-beers-2.xml
- remote_src: true
+- name: Copy state after second set_children
+ copy:
+ src: /tmp/ansible-xml-namespaced-beers.xml
+ dest: /tmp/ansible-xml-namespaced-beers-2.xml
+ remote_src: true
- - name: Compare to expected result
- copy:
- src: /tmp/ansible-xml-namespaced-beers-1.xml
- dest: /tmp/ansible-xml-namespaced-beers-2.xml
- remote_src: true
- check_mode: true
- diff: true
- register: comparison
- #command: diff /tmp/ansible-xml-namespaced-beers-1.xml /tmp/ansible-xml-namespaced-beers-2.xml
+- name: Compare to expected result
+ copy:
+ src: /tmp/ansible-xml-namespaced-beers-1.xml
+ dest: /tmp/ansible-xml-namespaced-beers-2.xml
+ remote_src: true
+ check_mode: true
+ diff: true
+ register: comparison
+ # command: diff /tmp/ansible-xml-namespaced-beers-1.xml /tmp/ansible-xml-namespaced-beers-2.xml
- - name: Test expected result
- assert:
- that:
+- name: Test expected result
+ assert:
+ that:
- set_children_again is not changed # idempotency
- set_namespaced_attribute_value is changed
- comparison is not changed # identical
diff --git a/tests/integration/targets/xml/tasks/test-set-namespaced-element-value.yml b/tests/integration/targets/xml/tasks/test-set-namespaced-element-value.yml
index 9944da8a55..6bdcd2e0e6 100644
--- a/tests/integration/targets/xml/tasks/test-set-namespaced-element-value.yml
+++ b/tests/integration/targets/xml/tasks/test-set-namespaced-element-value.yml
@@ -3,51 +3,51 @@
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
- - name: Setup test fixture
- copy:
- src: fixtures/ansible-xml-namespaced-beers.xml
- dest: /tmp/ansible-xml-namespaced-beers.xml
+- name: Setup test fixture
+ copy:
+ src: fixtures/ansible-xml-namespaced-beers.xml
+ dest: /tmp/ansible-xml-namespaced-beers.xml
- - name: Set namespaced '/bus:business/rat:rating' to '11'
- xml:
- path: /tmp/ansible-xml-namespaced-beers.xml
- namespaces:
- bus: http://test.business
- ber: http://test.beers
- rat: http://test.rating
- attr: http://test.attribute
- xpath: /bus:business/rat:rating
- value: '11'
- register: set_element_first_run
+- name: Set namespaced '/bus:business/rat:rating' to '11'
+ xml:
+ path: /tmp/ansible-xml-namespaced-beers.xml
+ namespaces:
+ bus: http://test.business
+ ber: http://test.beers
+ rat: http://test.rating
+ attr: http://test.attribute
+ xpath: /bus:business/rat:rating
+ value: '11'
+ register: set_element_first_run
- - name: Set namespaced '/bus:business/rat:rating' to '11' again
- xml:
- path: /tmp/ansible-xml-namespaced-beers.xml
- namespaces:
- bus: http://test.business
- ber: http://test.beers
- rat: http://test.rating
- attr: http://test.attribute
- xpath: /bus:business/rat:rating
- value: '11'
- register: set_element_second_run
+- name: Set namespaced '/bus:business/rat:rating' to '11' again
+ xml:
+ path: /tmp/ansible-xml-namespaced-beers.xml
+ namespaces:
+ bus: http://test.business
+ ber: http://test.beers
+ rat: http://test.rating
+ attr: http://test.attribute
+ xpath: /bus:business/rat:rating
+ value: '11'
+ register: set_element_second_run
- - name: Add trailing newline
- shell: echo "" >> /tmp/ansible-xml-namespaced-beers.xml
+- name: Add trailing newline
+ shell: echo "" >> /tmp/ansible-xml-namespaced-beers.xml
- - name: Compare to expected result
- copy:
- src: results/test-set-namespaced-element-value.xml
- dest: /tmp/ansible-xml-namespaced-beers.xml
- check_mode: true
- diff: true
- register: comparison
- #command: diff -u {{ role_path }}/results/test-set-namespaced-element-value.xml /tmp/ansible-xml-namespaced-beers.xml
+- name: Compare to expected result
+ copy:
+ src: results/test-set-namespaced-element-value.xml
+ dest: /tmp/ansible-xml-namespaced-beers.xml
+ check_mode: true
+ diff: true
+ register: comparison
+ # command: diff -u {{ role_path }}/results/test-set-namespaced-element-value.xml /tmp/ansible-xml-namespaced-beers.xml
- - name: Test expected result
- assert:
- that:
+- name: Test expected result
+ assert:
+ that:
- set_element_first_run is changed
- set_element_second_run is not changed
- comparison is not changed # identical
diff --git a/tests/integration/targets/xml/tasks/test-xmlstring.yml b/tests/integration/targets/xml/tasks/test-xmlstring.yml
index 1c2e4de4a8..c7339742b1 100644
--- a/tests/integration/targets/xml/tasks/test-xmlstring.yml
+++ b/tests/integration/targets/xml/tasks/test-xmlstring.yml
@@ -3,83 +3,83 @@
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
- - name: Copy expected results to remote
- copy:
- src: "results/{{ item }}"
- dest: "/tmp/{{ item }}"
- with_items:
- - test-pretty-print.xml
- - test-pretty-print-only.xml
+- name: Copy expected results to remote
+ copy:
+ src: "results/{{ item }}"
+ dest: "/tmp/{{ item }}"
+ with_items:
+ - test-pretty-print.xml
+ - test-pretty-print-only.xml
- # NOTE: Jinja2 templating eats trailing newlines
- - name: Read from xmlstring (not using pretty_print)
- xml:
- xmlstring: "{{ lookup('file', '{{ role_path }}/fixtures/ansible-xml-beers.xml') }}"
- xpath: .
- register: xmlresponse
+# NOTE: Jinja2 templating eats trailing newlines
+- name: Read from xmlstring (not using pretty_print)
+ xml:
+ xmlstring: "{{ lookup('file', '{{ role_path }}/fixtures/ansible-xml-beers.xml') }}"
+ xpath: .
+ register: xmlresponse
- - name: Compare to expected result
- copy:
- content: "{{ xmlresponse.xmlstring }}\n"
- dest: '/tmp/test-pretty-print-only.xml'
- check_mode: true
- diff: true
- register: comparison
+- name: Compare to expected result
+ copy:
+ content: "{{ xmlresponse.xmlstring }}\n"
+ dest: '/tmp/test-pretty-print-only.xml'
+ check_mode: true
+ diff: true
+ register: comparison
- - name: Test expected result
- assert:
- that:
+- name: Test expected result
+ assert:
+ that:
- xmlresponse is not changed
- comparison is not changed # identical
- #command: diff -u {{ role_path }}/results/test-pretty-print-only.xml /tmp/ansible-xml-beers.xml
+ # command: diff -u {{ role_path }}/results/test-pretty-print-only.xml /tmp/ansible-xml-beers.xml
- # NOTE: Jinja2 templating eats trailing newlines
- - name: Read from xmlstring (using pretty_print)
- xml:
- xmlstring: "{{ lookup('file', '{{ role_path }}/fixtures/ansible-xml-beers.xml') }}"
- pretty_print: true
- register: xmlresponse
+# NOTE: Jinja2 templating eats trailing newlines
+- name: Read from xmlstring (using pretty_print)
+ xml:
+ xmlstring: "{{ lookup('file', '{{ role_path }}/fixtures/ansible-xml-beers.xml') }}"
+ pretty_print: true
+ register: xmlresponse
- - name: Compare to expected result
- copy:
- content: '{{ xmlresponse.xmlstring }}'
- dest: '/tmp/test-pretty-print-only.xml'
- check_mode: true
- diff: true
- register: comparison
+- name: Compare to expected result
+ copy:
+ content: '{{ xmlresponse.xmlstring }}'
+ dest: '/tmp/test-pretty-print-only.xml'
+ check_mode: true
+ diff: true
+ register: comparison
- # FIXME: This change is related to the newline added by pretty_print
- - name: Test expected result
- assert:
- that:
+# FIXME: This change is related to the newline added by pretty_print
+- name: Test expected result
+ assert:
+ that:
- xmlresponse is changed
- comparison is not changed # identical
- #command: diff -u {{ role_path }}/results/test-pretty-print-only.xml /tmp/ansible-xml-beers.xml
+ # command: diff -u {{ role_path }}/results/test-pretty-print-only.xml /tmp/ansible-xml-beers.xml
- # NOTE: Jinja2 templating eats trailing newlines
- - name: Read from xmlstring
- xml:
- xmlstring: "{{ lookup('file', '{{ role_path }}/fixtures/ansible-xml-beers.xml') }}"
- xpath: /business/beers
- pretty_print: true
- add_children:
+# NOTE: Jinja2 templating eats trailing newlines
+- name: Read from xmlstring
+ xml:
+ xmlstring: "{{ lookup('file', '{{ role_path }}/fixtures/ansible-xml-beers.xml') }}"
+ xpath: /business/beers
+ pretty_print: true
+ add_children:
- beer: Old Rasputin
- register: xmlresponse_modification
+ register: xmlresponse_modification
- - name: Compare to expected result
- copy:
- content: '{{ xmlresponse_modification.xmlstring }}'
- dest: '/tmp/test-pretty-print.xml'
- check_mode: true
- diff: true
- register: comparison
+- name: Compare to expected result
+ copy:
+ content: '{{ xmlresponse_modification.xmlstring }}'
+ dest: '/tmp/test-pretty-print.xml'
+ check_mode: true
+ diff: true
+ register: comparison
- # FIXME: This change is related to the newline added by pretty_print
- - name: Test expected result
- assert:
- that:
+# FIXME: This change is related to the newline added by pretty_print
+- name: Test expected result
+ assert:
+ that:
- xmlresponse_modification is changed
- comparison is not changed # identical
- #command: diff -u {{ role_path }}/results/test-pretty-print.xml /tmp/ansible-xml-beers.xml
+ # command: diff -u {{ role_path }}/results/test-pretty-print.xml /tmp/ansible-xml-beers.xml
diff --git a/tests/integration/targets/xml/vars/main.yml b/tests/integration/targets/xml/vars/main.yml
index a8dfc23962..a7b08c3137 100644
--- a/tests/integration/targets/xml/vars/main.yml
+++ b/tests/integration/targets/xml/vars/main.yml
@@ -6,6 +6,6 @@
# -*- mode: yaml -*
bad_beers:
-- beer: "Natty Lite"
-- beer: "Miller Lite"
-- beer: "Coors Lite"
+ - beer: "Natty Lite"
+ - beer: "Miller Lite"
+ - beer: "Coors Lite"
diff --git a/tests/integration/targets/yarn/tasks/run.yml b/tests/integration/targets/yarn/tasks/run.yml
index 0d7d6fb421..d48eacc4d4 100644
--- a/tests/integration/targets/yarn/tasks/run.yml
+++ b/tests/integration/targets/yarn/tasks/run.yml
@@ -33,7 +33,7 @@
# Set vars for our test harness
- vars:
- #node_bin_path: "/usr/local/lib/nodejs/node-v{{nodejs_version}}/bin"
+ # node_bin_path: "/usr/local/lib/nodejs/node-v{{nodejs_version}}/bin"
node_bin_path: "/usr/local/lib/nodejs/{{ nodejs_path }}/bin"
yarn_bin_path: "{{ remote_tmp_dir }}/yarn-v{{ yarn_version }}/bin"
package: 'iconv-lite'
diff --git a/tests/integration/targets/connection_proxmox_pct_remote/aliases b/tests/integration/targets/zpool/aliases
similarity index 57%
rename from tests/integration/targets/connection_proxmox_pct_remote/aliases
rename to tests/integration/targets/zpool/aliases
index d2fefd10c7..083656f786 100644
--- a/tests/integration/targets/connection_proxmox_pct_remote/aliases
+++ b/tests/integration/targets/zpool/aliases
@@ -1,12 +1,15 @@
-# Copyright (c) 2025 Nils Stein (@mietzen)
-# Copyright (c) 2025 Ansible Project
+# Copyright (c) Ansible Project
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
azp/posix/3
+azp/posix/vm
destructive
-needs/root
-needs/target/connection
-skip/docker
-skip/alpine
+needs/privileged
+skip/aix
+skip/freebsd
+skip/osx
skip/macos
+skip/rhel
+skip/docker
+skip/alpine # TODO: figure out what goes wrong
diff --git a/tests/integration/targets/zpool/defaults/main.yml b/tests/integration/targets/zpool/defaults/main.yml
new file mode 100644
index 0000000000..e55a02c00b
--- /dev/null
+++ b/tests/integration/targets/zpool/defaults/main.yml
@@ -0,0 +1,34 @@
+---
+# Copyright (c) 2025, Tom Hesse
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+zpool_single_disk_config:
+ - "{{ remote_tmp_dir }}/disk0.img"
+
+zpool_mirror_disk_config:
+ - "{{ remote_tmp_dir }}/disk1.img"
+ - "{{ remote_tmp_dir }}/disk2.img"
+
+zpool_raidz_disk_config:
+ - "{{ remote_tmp_dir }}/disk3.img"
+ - "{{ remote_tmp_dir }}/disk4.img"
+
+zpool_vdevs_disk_config:
+ vdev1:
+ - "{{ remote_tmp_dir }}/disk5.img"
+ vdev2:
+ - "{{ remote_tmp_dir }}/disk6.img"
+ vdev3:
+ - "{{ remote_tmp_dir }}/disk7.img"
+ - "{{ remote_tmp_dir }}/disk8.img"
+ vdev4:
+ - "{{ remote_tmp_dir }}/disk9.img"
+ - "{{ remote_tmp_dir }}/disk10.img"
+
+zpool_disk_configs: "{{ zpool_single_disk_config + zpool_mirror_disk_config + zpool_raidz_disk_config + (zpool_vdevs_disk_config.values() | flatten) }}"
+
+zpool_single_disk_pool_name: spool
+zpool_mirror_disk_pool_name: mpool
+zpool_raidz_disk_pool_name: rpool
+zpool_generic_pool_name: tank
diff --git a/tests/integration/targets/zpool/meta/main.yml b/tests/integration/targets/zpool/meta/main.yml
new file mode 100644
index 0000000000..33f3a16566
--- /dev/null
+++ b/tests/integration/targets/zpool/meta/main.yml
@@ -0,0 +1,7 @@
+---
+# Copyright (c) 2025, Tom Hesse
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+dependencies:
+ - setup_remote_tmp_dir
diff --git a/tests/integration/targets/zpool/tasks/add_remove_vdevs.yml b/tests/integration/targets/zpool/tasks/add_remove_vdevs.yml
new file mode 100644
index 0000000000..f2cd7c55b9
--- /dev/null
+++ b/tests/integration/targets/zpool/tasks/add_remove_vdevs.yml
@@ -0,0 +1,147 @@
+---
+# Copyright (c) 2025, Tom Hesse
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Test adding a single disk vdev
+ block:
+ - name: Ensure a single disk pool exists
+ community.general.zpool:
+ name: "{{ zpool_generic_pool_name }}"
+ vdevs:
+ - disks: "{{ zpool_vdevs_disk_config.vdev1 }}"
+ state: present
+
+ - name: Add a single disk vdev
+ community.general.zpool:
+ name: "{{ zpool_generic_pool_name }}"
+ vdevs:
+ - disks: "{{ zpool_vdevs_disk_config.vdev1 }}"
+ - disks: "{{ zpool_vdevs_disk_config.vdev2 }}"
+ state: present
+
+ - name: Check if vdev was added
+ ansible.builtin.command:
+ cmd: "zpool status -P -L {{ zpool_generic_pool_name }}"
+ register: single_disk_pool_check
+ changed_when: false
+
+ - name: Assert that added disk is present
+ ansible.builtin.assert:
+ that:
+ - "zpool_vdevs_disk_config.vdev2[0] in single_disk_pool_check.stdout"
+
+ - name: Ensure the single disk pool is absent
+ community.general.zpool:
+ name: "{{ zpool_generic_pool_name }}"
+ state: absent
+
+- name: Test adding a mirror vdev
+ block:
+ - name: Ensure a single disk pool exists
+ community.general.zpool:
+ name: "{{ zpool_generic_pool_name }}"
+ vdevs:
+ - disks: "{{ zpool_vdevs_disk_config.vdev1 }}"
+ state: present
+
+ - name: Add a mirror vdev
+ community.general.zpool:
+ name: "{{ zpool_generic_pool_name }}"
+ force: true # This is necessary because of the mismatched replication level
+ vdevs:
+ - disks: "{{ zpool_vdevs_disk_config.vdev1 }}"
+ - type: mirror
+ disks: "{{ zpool_vdevs_disk_config.vdev3 }}"
+ state: present
+
+ - name: Check if vdev was added
+ ansible.builtin.command:
+ cmd: "zpool status -P -L {{ zpool_generic_pool_name }}"
+ register: mirror_pool_check
+ changed_when: false
+
+ - name: Assert that added vdev is present
+ ansible.builtin.assert:
+ that:
+ - "zpool_vdevs_disk_config.vdev3[0] in mirror_pool_check.stdout"
+ - "zpool_vdevs_disk_config.vdev3[1] in mirror_pool_check.stdout"
+
+ - name: Ensure the single disk pool is absent
+ community.general.zpool:
+ name: "{{ zpool_generic_pool_name }}"
+ state: absent
+
+- name: Test adding a raidz vdev
+ block:
+ - name: Ensure a single disk pool exists
+ community.general.zpool:
+ name: "{{ zpool_generic_pool_name }}"
+ vdevs:
+ - disks: "{{ zpool_vdevs_disk_config.vdev1 }}"
+ state: present
+
+ - name: Add a raidz vdev
+ community.general.zpool:
+ name: "{{ zpool_generic_pool_name }}"
+ force: true # This is necessary because of the mismatched replication level
+ vdevs:
+ - disks: "{{ zpool_vdevs_disk_config.vdev1 }}"
+ - type: raidz
+ disks: "{{ zpool_vdevs_disk_config.vdev3 }}"
+ state: present
+
+ - name: Check if vdev was added
+ ansible.builtin.command:
+ cmd: "zpool status -P -L {{ zpool_generic_pool_name }}"
+ register: raidz_pool_check
+ changed_when: false
+
+ - name: Assert that added vdev is present
+ ansible.builtin.assert:
+ that:
+ - "zpool_vdevs_disk_config.vdev3[0] in raidz_pool_check.stdout"
+ - "zpool_vdevs_disk_config.vdev3[1] in raidz_pool_check.stdout"
+
+ - name: Ensure the single disk pool is absent
+ community.general.zpool:
+ name: "{{ zpool_generic_pool_name }}"
+ state: absent
+
+- name: Test removing an existing vdev
+ block:
+ - name: Ensure a pool with two mirrored vdevs exists
+ community.general.zpool:
+ name: "{{ zpool_generic_pool_name }}"
+ vdevs:
+ - type: mirror
+ disks: "{{ zpool_vdevs_disk_config.vdev3 }}"
+ - type: mirror
+ disks: "{{ zpool_vdevs_disk_config.vdev4 }}"
+ state: present
+
+ - name: Remove a vdev
+ community.general.zpool:
+ name: "{{ zpool_generic_pool_name }}"
+ vdevs:
+ - type: mirror
+ disks: "{{ zpool_vdevs_disk_config.vdev4 }}"
+ state: present
+
+ - name: Check if vdev was removed
+ ansible.builtin.command:
+ cmd: "zpool status -P -L {{ zpool_generic_pool_name }}"
+ register: remove_vdev_check
+ changed_when: false
+
+ - name: Assert that removed vdev is absent
+ ansible.builtin.assert:
+ that:
+ - "zpool_vdevs_disk_config.vdev3[0] not in remove_vdev_check.stdout"
+ - "zpool_vdevs_disk_config.vdev3[1] not in remove_vdev_check.stdout"
+ - "'Removal of vdev' in remove_vdev_check.stdout"
+
+ - name: Ensure the pool is absent
+ community.general.zpool:
+ name: "{{ zpool_generic_pool_name }}"
+ state: absent
diff --git a/tests/integration/targets/zpool/tasks/create_destroy.yml b/tests/integration/targets/zpool/tasks/create_destroy.yml
new file mode 100644
index 0000000000..f327a4f908
--- /dev/null
+++ b/tests/integration/targets/zpool/tasks/create_destroy.yml
@@ -0,0 +1,123 @@
+---
+# Copyright (c) 2025, Tom Hesse
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Test single disk pool creation
+ block:
+ - name: Ensure single disk pool exists
+ community.general.zpool:
+ name: "{{ zpool_single_disk_pool_name }}"
+ vdevs:
+ - disks: "{{ zpool_single_disk_config }}"
+
+ - name: Check if single disk pool exists
+ ansible.builtin.command:
+ cmd: "zpool list -H -o name,health {{ zpool_single_disk_pool_name }}"
+ register: single_disk_pool_check
+ changed_when: false
+
+ - name: Assert that single disk pool is online
+ ansible.builtin.assert:
+ that:
+ - "zpool_single_disk_pool_name in single_disk_pool_check.stdout"
+ - "'ONLINE' in single_disk_pool_check.stdout"
+
+- name: Test mirror disk pool creation
+ block:
+ - name: Ensure mirror disk pool exists
+ community.general.zpool:
+ name: "{{ zpool_mirror_disk_pool_name }}"
+ vdevs:
+ - type: mirror
+ disks: "{{ zpool_mirror_disk_config }}"
+
+ - name: Check if mirror disk pool exists
+ ansible.builtin.command:
+ cmd: "zpool list -H -o name,health {{ zpool_mirror_disk_pool_name }}"
+ register: mirror_disk_pool_check
+ changed_when: false
+
+ - name: Assert that mirror disk pool is online
+ ansible.builtin.assert:
+ that:
+ - "zpool_mirror_disk_pool_name in mirror_disk_pool_check.stdout"
+ - "'ONLINE' in mirror_disk_pool_check.stdout"
+
+- name: Test raidz disk pool creation
+ block:
+ - name: Ensure raidz disk pool exists
+ community.general.zpool:
+ name: "{{ zpool_raidz_disk_pool_name }}"
+ vdevs:
+ - type: raidz
+ disks: "{{ zpool_raidz_disk_config }}"
+
+ - name: Check if raidz disk pool exists
+ ansible.builtin.command:
+ cmd: "zpool list -H -o name,health {{ zpool_raidz_disk_pool_name }}"
+ register: raidz_disk_pool_check
+ changed_when: false
+
+ - name: Assert that raidz disk pool is online
+ ansible.builtin.assert:
+ that:
+ - "zpool_raidz_disk_pool_name in raidz_disk_pool_check.stdout"
+ - "'ONLINE' in raidz_disk_pool_check.stdout"
+
+- name: Test single disk pool deletion
+ block:
+ - name: Ensure single disk pool is absent
+ community.general.zpool:
+ name: "{{ zpool_single_disk_pool_name }}"
+ state: absent
+
+ - name: Check if single disk pool is absent
+ ansible.builtin.command:
+ cmd: "zpool list -H -o name,health {{ zpool_single_disk_pool_name }}"
+ register: single_disk_pool_check
+ ignore_errors: true
+ changed_when: false
+
+ - name: Assert that single disk pool is online
+ ansible.builtin.assert:
+ that:
+ - "'no such pool' in single_disk_pool_check.stderr"
+
+- name: Test mirror disk pool deletion
+ block:
+ - name: Ensure mirror disk pool is absent
+ community.general.zpool:
+ name: "{{ zpool_mirror_disk_pool_name }}"
+ state: absent
+
+ - name: Check if mirror disk pool is absent
+ ansible.builtin.command:
+ cmd: "zpool list -H -o name,health {{ zpool_mirror_disk_pool_name }}"
+ register: mirror_disk_pool_check
+ ignore_errors: true
+ changed_when: false
+
+ - name: Assert that mirror disk pool is online
+ ansible.builtin.assert:
+ that:
+ - "'no such pool' in mirror_disk_pool_check.stderr"
+
+- name: Test raidz disk pool deletion
+ block:
+ - name: Ensure raidz disk pool is absent
+ community.general.zpool:
+ name: "{{ zpool_raidz_disk_pool_name }}"
+ state: absent
+
+ - name: Check if raidz disk pool is absent
+ ansible.builtin.command:
+ cmd: "zpool list -H -o name,health {{ zpool_raidz_disk_pool_name }}"
+ register: raidz_disk_pool_check
+ ignore_errors: true
+ changed_when: false
+
+ - name: Assert that raidz disk pool is online
+ ansible.builtin.assert:
+ that:
+ - "'no such pool' in raidz_disk_pool_check.stderr"
diff --git a/tests/integration/targets/zpool/tasks/install_requirements_alpine.yml b/tests/integration/targets/zpool/tasks/install_requirements_alpine.yml
new file mode 100644
index 0000000000..a734ed4616
--- /dev/null
+++ b/tests/integration/targets/zpool/tasks/install_requirements_alpine.yml
@@ -0,0 +1,15 @@
+---
+# Copyright (c) 2025, Tom Hesse
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Install required packages
+ community.general.apk:
+ name:
+ - zfs
+ - zfs-lts
+
+- name: Load zfs module
+ community.general.modprobe:
+ name: zfs
+ state: present
diff --git a/tests/integration/targets/zpool/tasks/install_requirements_ubuntu.yml b/tests/integration/targets/zpool/tasks/install_requirements_ubuntu.yml
new file mode 100644
index 0000000000..435f4752fc
--- /dev/null
+++ b/tests/integration/targets/zpool/tasks/install_requirements_ubuntu.yml
@@ -0,0 +1,10 @@
+---
+# Copyright (c) 2025, Tom Hesse
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Install required packages
+ ansible.builtin.apt:
+ name:
+ - zfsutils-linux
+ - util-linux
diff --git a/tests/integration/targets/zpool/tasks/main.yml b/tests/integration/targets/zpool/tasks/main.yml
new file mode 100644
index 0000000000..b5eefc2ffd
--- /dev/null
+++ b/tests/integration/targets/zpool/tasks/main.yml
@@ -0,0 +1,25 @@
+---
+# Copyright (c) 2025, Tom Hesse
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Execute integration tests
+ become: true
+ block:
+ - name: Ensure disk files exists
+ ansible.builtin.command:
+ cmd: "dd if=/dev/zero of={{ item }} bs=1M count=256 conv=fsync"
+ creates: "{{ item }}"
+ loop: "{{ zpool_disk_configs }}"
+
+ - name: Include distribution specific install_requirements.yml
+ ansible.builtin.include_tasks: install_requirements_{{ ansible_distribution | lower }}.yml
+
+ - name: Include create_destroy.yml
+ ansible.builtin.include_tasks: create_destroy.yml
+
+ - name: Include add_remove_vdevs.yml
+ ansible.builtin.include_tasks: add_remove_vdevs.yml
+
+ - name: Include properties.yml
+ ansible.builtin.include_tasks: properties.yml
diff --git a/tests/integration/targets/zpool/tasks/properties.yml b/tests/integration/targets/zpool/tasks/properties.yml
new file mode 100644
index 0000000000..ec7a8c3d7b
--- /dev/null
+++ b/tests/integration/targets/zpool/tasks/properties.yml
@@ -0,0 +1,73 @@
+---
+# Copyright (c) 2025, Tom Hesse
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Ensure pool with two mirrored disks exists
+ community.general.zpool:
+ name: "{{ zpool_generic_pool_name }}"
+ pool_properties:
+ ashift: 12
+ filesystem_properties:
+ compression: false
+ vdevs:
+ - type: mirror
+ disks: "{{ zpool_vdevs_disk_config.vdev3 }}"
+ - type: mirror
+ disks: "{{ zpool_vdevs_disk_config.vdev4 }}"
+ state: present
+
+- name: Test changing of a pool property
+ block:
+ - name: Change ashift from 12 to 13
+ community.general.zpool:
+ name: "{{ zpool_generic_pool_name }}"
+ pool_properties:
+ ashift: 13
+ vdevs:
+ - type: mirror
+ disks: "{{ zpool_vdevs_disk_config.vdev3 }}"
+ - type: mirror
+ disks: "{{ zpool_vdevs_disk_config.vdev4 }}"
+ state: present
+
+ - name: Check ashift
+ ansible.builtin.command:
+ cmd: "zpool get -H -o value ashift {{ zpool_generic_pool_name }}"
+ changed_when: false
+ register: ashift_check
+
+ - name: Assert ashift has changed
+ ansible.builtin.assert:
+ that:
+ - "'13' in ashift_check.stdout"
+
+- name: Test changing of a dataset property
+ block:
+ - name: Change compression from off to lz4
+ community.general.zpool:
+ name: "{{ zpool_generic_pool_name }}"
+ filesystem_properties:
+ compression: lz4
+ vdevs:
+ - type: mirror
+ disks: "{{ zpool_vdevs_disk_config.vdev3 }}"
+ - type: mirror
+ disks: "{{ zpool_vdevs_disk_config.vdev4 }}"
+ state: present
+
+ - name: Check compression
+ ansible.builtin.command:
+ cmd: "zfs get -H -o value compression {{ zpool_generic_pool_name }}"
+ changed_when: false
+ register: compression_check
+
+ - name: Assert compression has changed
+ ansible.builtin.assert:
+ that:
+ - "'lz4' in compression_check.stdout"
+
+- name: Cleanup pool
+ community.general.zpool:
+ name: "{{ zpool_generic_pool_name }}"
+ state: absent
diff --git a/tests/integration/targets/zypper/tasks/zypper.yml b/tests/integration/targets/zypper/tasks/zypper.yml
index 818bdd9f42..ae7dc83b4a 100644
--- a/tests/integration/targets/zypper/tasks/zypper.yml
+++ b/tests/integration/targets/zypper/tasks/zypper.yml
@@ -31,8 +31,8 @@
- name: verify uninstallation of hello
assert:
that:
- - "zypper_result.rc == 0"
- - "rpm_result.rc == 1"
+ - "zypper_result.rc == 0"
+ - "rpm_result.rc == 1"
# UNINSTALL AGAIN
- name: uninstall hello again
@@ -44,7 +44,7 @@
- name: verify no change on re-uninstall
assert:
that:
- - "not zypper_result.changed"
+ - "not zypper_result.changed"
# INSTALL
- name: install hello
@@ -64,9 +64,9 @@
- name: verify installation of hello
assert:
that:
- - "zypper_result.rc == 0"
- - "zypper_result.changed"
- - "rpm_result.rc == 0"
+ - "zypper_result.rc == 0"
+ - "zypper_result.changed"
+ - "rpm_result.rc == 0"
# INSTALL AGAIN
- name: install hello again
@@ -78,7 +78,7 @@
- name: verify no change on second install
assert:
that:
- - "not zypper_result.changed"
+ - "not zypper_result.changed"
# Multiple packages
- name: uninstall hello and metamail
@@ -102,8 +102,8 @@
- name: verify packages uninstalled
assert:
that:
- - "rpm_hello_result.rc != 0"
- - "rpm_metamail_result.rc != 0"
+ - "rpm_hello_result.rc != 0"
+ - "rpm_metamail_result.rc != 0"
- name: install hello and metamail
zypper:
@@ -126,10 +126,10 @@
- name: verify packages installed
assert:
that:
- - "zypper_result.rc == 0"
- - "zypper_result.changed"
- - "rpm_hello_result.rc == 0"
- - "rpm_metamail_result.rc == 0"
+ - "zypper_result.rc == 0"
+ - "zypper_result.changed"
+ - "rpm_hello_result.rc == 0"
+ - "rpm_metamail_result.rc == 0"
- name: uninstall hello and metamail
zypper:
@@ -190,7 +190,7 @@
state: present
- name: clean zypper RPM cache
- file:
+ file:
name: /var/cache/zypper/RPMS
state: absent
@@ -230,9 +230,9 @@
- name: verify installation of empty
assert:
that:
- - "zypper_result.rc == 0"
- - "zypper_result.changed"
- - "rpm_result.rc == 0"
+ - "zypper_result.rc == 0"
+ - "zypper_result.changed"
+ - "rpm_result.rc == 0"
- name: uninstall empty
zypper:
@@ -253,7 +253,7 @@
- name: check that we extract rpm package in testdir folder and folder var is exist
assert:
that:
- - "stat_result.stat.exists == true"
+ - "stat_result.stat.exists == true"
# Build and install an empty rpm with error in post script
- name: uninstall post_error
@@ -267,7 +267,7 @@
state: present
- name: clean zypper RPM cache
- file:
+ file:
name: /var/cache/zypper/RPMS
state: absent
@@ -308,9 +308,9 @@
- name: verify installation of post_error
assert:
that:
- - "zypper_result.rc == 0"
- - "zypper_result.changed"
- - "rpm_result.rc == 0"
+ - "zypper_result.rc == 0"
+ - "zypper_result.changed"
+ - "rpm_result.rc == 0"
- name: uninstall post_error
zypper:
@@ -332,9 +332,9 @@
- name: verify installation of post_error
assert:
that:
- - "zypper_result.rc == 107"
- - "not zypper_result.changed"
- - "rpm_result.rc == 0"
+ - "zypper_result.rc == 107"
+ - "not zypper_result.changed"
+ - "rpm_result.rc == 0"
- name: uninstall post_error
zypper:
@@ -354,8 +354,8 @@
state: absent
- name: install and remove in the same run, with +- prefix
- zypper:
- name:
+ zypper:
+ name:
- -hello
- +metamail
state: present
@@ -417,13 +417,13 @@
- name: try rm patch
zypper:
- name: openSUSE-2016-128
+ name: openSUSE-2016-128
type: patch
state: absent
ignore_errors: true
register: zypper_patch
- assert:
- that:
+ that:
- zypper_patch is failed
- zypper_patch.msg.startswith('Can not remove patches.')
@@ -434,7 +434,7 @@
ignore_errors: true
register: zypper_rm
- assert:
- that:
+ that:
- zypper_rm is failed
- zypper_rm.msg.startswith('Can not remove via URL.')
@@ -450,7 +450,7 @@
type: pattern
state: present
register: zypper_install_pattern1
-
+
- name: install pattern update_test again
zypper:
name: update_test
@@ -479,7 +479,7 @@
name: hello
state: present
register: zypperin2
-
+
- assert:
that:
- zypperin1 is succeeded
diff --git a/tests/integration/targets/zypper_repository/tasks/zypper_repository.yml b/tests/integration/targets/zypper_repository/tasks/zypper_repository.yml
index ec362af108..8c322421c4 100644
--- a/tests/integration/targets/zypper_repository/tasks/zypper_repository.yml
+++ b/tests/integration/targets/zypper_repository/tasks/zypper_repository.yml
@@ -218,72 +218,72 @@
# (Maybe 'Uyuni' needs to be replaced with something else?)
- when: ansible_distribution_version is version('15.4', '<')
block:
- - name: add new repository via url to .repo file
- community.general.zypper_repository:
- repo: http://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Stable/openSUSE_Leap_{{ ansible_distribution_version }}/systemsmanagement:Uyuni:Stable.repo
- state: present
- register: added_by_repo_file
+ - name: add new repository via url to .repo file
+ community.general.zypper_repository:
+ repo: http://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Stable/openSUSE_Leap_{{ ansible_distribution_version }}/systemsmanagement:Uyuni:Stable.repo
+ state: present
+ register: added_by_repo_file
- - name: get repository details from zypper
- command: zypper lr systemsmanagement_Uyuni_Stable
- register: get_repository_details_from_zypper
+ - name: get repository details from zypper
+ command: zypper lr systemsmanagement_Uyuni_Stable
+ register: get_repository_details_from_zypper
- - name: verify adding via .repo file was successful
- assert:
- that:
- - "added_by_repo_file is changed"
- - "get_repository_details_from_zypper.rc == 0"
- - "'/systemsmanagement:/Uyuni:/Stable/' in get_repository_details_from_zypper.stdout"
+ - name: verify adding via .repo file was successful
+ assert:
+ that:
+ - "added_by_repo_file is changed"
+ - "get_repository_details_from_zypper.rc == 0"
+ - "'/systemsmanagement:/Uyuni:/Stable/' in get_repository_details_from_zypper.stdout"
- - name: add same repository via url to .repo file again to verify idempotency
- community.general.zypper_repository:
- repo: http://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Stable/openSUSE_Leap_{{ ansible_distribution_version }}/systemsmanagement:Uyuni:Stable.repo
- state: present
- register: added_again_by_repo_file
+ - name: add same repository via url to .repo file again to verify idempotency
+ community.general.zypper_repository:
+ repo: http://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Stable/openSUSE_Leap_{{ ansible_distribution_version }}/systemsmanagement:Uyuni:Stable.repo
+ state: present
+ register: added_again_by_repo_file
- - name: verify nothing was changed adding a repo with the same .repo file
- assert:
- that:
- - added_again_by_repo_file is not changed
+ - name: verify nothing was changed adding a repo with the same .repo file
+ assert:
+ that:
+ - added_again_by_repo_file is not changed
- - name: remove repository via url to .repo file
- community.general.zypper_repository:
- repo: http://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Stable/openSUSE_Leap_{{ ansible_distribution_version }}/systemsmanagement:Uyuni:Stable.repo
- state: absent
- register: removed_by_repo_file
+ - name: remove repository via url to .repo file
+ community.general.zypper_repository:
+ repo: http://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Stable/openSUSE_Leap_{{ ansible_distribution_version }}/systemsmanagement:Uyuni:Stable.repo
+ state: absent
+ register: removed_by_repo_file
- - name: get list of files in /etc/zypp/repos.d/
- command: ls /etc/zypp/repos.d/
- changed_when: false
- register: etc_zypp_reposd
+ - name: get list of files in /etc/zypp/repos.d/
+ command: ls /etc/zypp/repos.d/
+ changed_when: false
+ register: etc_zypp_reposd
- - name: verify removal via .repo file was successful, including cleanup of local .repo file in /etc/zypp/repos.d/
- assert:
- that:
- - "removed_by_repo_file"
- - "'/systemsmanagement:/Uyuni:/Stable/' not in etc_zypp_reposd.stdout"
+ - name: verify removal via .repo file was successful, including cleanup of local .repo file in /etc/zypp/repos.d/
+ assert:
+ that:
+ - "removed_by_repo_file"
+ - "'/systemsmanagement:/Uyuni:/Stable/' not in etc_zypp_reposd.stdout"
# FIXME: THIS DOESN'T SEEM TO WORK ANYMORE WITH ANY OPENSUSE VERSION IN CI!
- when: false
block:
- - name: Copy test .repo file
- copy:
- src: 'files/systemsmanagement_Uyuni_Utils.repo'
- dest: '{{ remote_tmp_dir }}'
+ - name: Copy test .repo file
+ copy:
+ src: 'files/systemsmanagement_Uyuni_Utils.repo'
+ dest: '{{ remote_tmp_dir }}'
- - name: add new repository via local path to .repo file
- community.general.zypper_repository:
- repo: "{{ remote_tmp_dir }}/systemsmanagement_Uyuni_Utils.repo"
- state: present
- register: added_by_repo_local_file
+ - name: add new repository via local path to .repo file
+ community.general.zypper_repository:
+ repo: "{{ remote_tmp_dir }}/systemsmanagement_Uyuni_Utils.repo"
+ state: present
+ register: added_by_repo_local_file
- - name: get repository details for systemsmanagement_Uyuni_Utils from zypper
- command: zypper lr systemsmanagement_Uyuni_Utils
- register: get_repository_details_from_zypper_for_systemsmanagement_Uyuni_Utils
+ - name: get repository details for systemsmanagement_Uyuni_Utils from zypper
+ command: zypper lr systemsmanagement_Uyuni_Utils
+ register: get_repository_details_from_zypper_for_systemsmanagement_Uyuni_Utils
- - name: verify adding repository via local .repo file was successful
- assert:
- that:
- - "added_by_repo_local_file is changed"
- - "get_repository_details_from_zypper_for_systemsmanagement_Uyuni_Utils.rc == 0"
- - "'/systemsmanagement:/Uyuni:/Utils/' in get_repository_details_from_zypper_for_systemsmanagement_Uyuni_Utils.stdout"
+ - name: verify adding repository via local .repo file was successful
+ assert:
+ that:
+ - "added_by_repo_local_file is changed"
+ - "get_repository_details_from_zypper_for_systemsmanagement_Uyuni_Utils.rc == 0"
+ - "'/systemsmanagement:/Uyuni:/Utils/' in get_repository_details_from_zypper_for_systemsmanagement_Uyuni_Utils.stdout"
diff --git a/tests/sanity/ignore-2.16.txt b/tests/sanity/ignore-2.16.txt
index 8ac70d76d7..1a4c8f89b1 100644
--- a/tests/sanity/ignore-2.16.txt
+++ b/tests/sanity/ignore-2.16.txt
@@ -1,10 +1,10 @@
+plugins/callback/tasks_only.py yamllint:unparsable-with-libyaml
plugins/connection/wsl.py yamllint:unparsable-with-libyaml
plugins/inventory/gitlab_runners.py yamllint:unparsable-with-libyaml
plugins/inventory/iocage.py yamllint:unparsable-with-libyaml
plugins/inventory/linode.py yamllint:unparsable-with-libyaml
plugins/inventory/lxd.py yamllint:unparsable-with-libyaml
plugins/inventory/nmap.py yamllint:unparsable-with-libyaml
-plugins/inventory/proxmox.py yamllint:unparsable-with-libyaml
plugins/inventory/scaleway.py yamllint:unparsable-with-libyaml
plugins/inventory/virtualbox.py yamllint:unparsable-with-libyaml
plugins/lookup/dependent.py validate-modules:unidiomatic-typecheck
@@ -19,4 +19,5 @@ plugins/modules/rhevm.py validate-modules:parameter-state-invalid-choice
plugins/modules/udm_user.py import-3.11 # Uses deprecated stdlib library 'crypt'
plugins/modules/udm_user.py import-3.12 # Uses deprecated stdlib library 'crypt'
plugins/modules/xfconf.py validate-modules:return-syntax-error
+plugins/test/ansible_type.py yamllint:unparsable-with-libyaml
tests/unit/plugins/modules/test_gio_mime.yaml no-smart-quotes
diff --git a/tests/sanity/ignore-2.15.txt b/tests/sanity/ignore-2.20.txt
similarity index 54%
rename from tests/sanity/ignore-2.15.txt
rename to tests/sanity/ignore-2.20.txt
index 6115954d00..97751e5a92 100644
--- a/tests/sanity/ignore-2.15.txt
+++ b/tests/sanity/ignore-2.20.txt
@@ -1,21 +1,14 @@
-.azure-pipelines/scripts/publish-codecov.py replace-urlopen
-plugins/connection/wsl.py yamllint:unparsable-with-libyaml
-plugins/inventory/gitlab_runners.py yamllint:unparsable-with-libyaml
-plugins/inventory/iocage.py yamllint:unparsable-with-libyaml
-plugins/inventory/linode.py yamllint:unparsable-with-libyaml
-plugins/inventory/lxd.py yamllint:unparsable-with-libyaml
-plugins/inventory/nmap.py yamllint:unparsable-with-libyaml
-plugins/inventory/proxmox.py yamllint:unparsable-with-libyaml
-plugins/inventory/scaleway.py yamllint:unparsable-with-libyaml
-plugins/inventory/virtualbox.py yamllint:unparsable-with-libyaml
-plugins/lookup/dependent.py validate-modules:unidiomatic-typecheck
plugins/modules/consul_session.py validate-modules:parameter-state-invalid-choice
plugins/modules/homectl.py import-3.11 # Uses deprecated stdlib library 'crypt'
+plugins/modules/homectl.py import-3.12 # Uses deprecated stdlib library 'crypt'
plugins/modules/iptables_state.py validate-modules:undocumented-parameter # params _back and _timeout used by action plugin
plugins/modules/lxc_container.py validate-modules:use-run-command-not-popen
plugins/modules/osx_defaults.py validate-modules:parameter-state-invalid-choice
plugins/modules/parted.py validate-modules:parameter-state-invalid-choice
plugins/modules/rhevm.py validate-modules:parameter-state-invalid-choice
plugins/modules/udm_user.py import-3.11 # Uses deprecated stdlib library 'crypt'
+plugins/modules/udm_user.py import-3.12 # Uses deprecated stdlib library 'crypt'
plugins/modules/xfconf.py validate-modules:return-syntax-error
+plugins/module_utils/univention_umc.py pylint:use-yield-from # suggested construct does not work with Python 2
+tests/unit/plugins/modules/uthelper.py pylint:use-yield-from # suggested construct does not work with Python 2
tests/unit/plugins/modules/test_gio_mime.yaml no-smart-quotes
diff --git a/tests/sanity/ignore-2.20.txt.license b/tests/sanity/ignore-2.20.txt.license
new file mode 100644
index 0000000000..edff8c7685
--- /dev/null
+++ b/tests/sanity/ignore-2.20.txt.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/tests/unit/plugins/callback/test_opentelemetry.py b/tests/unit/plugins/callback/test_opentelemetry.py
index 682a051efb..1da506c262 100644
--- a/tests/unit/plugins/callback/test_opentelemetry.py
+++ b/tests/unit/plugins/callback/test_opentelemetry.py
@@ -95,22 +95,6 @@ class TestOpentelemetry(unittest.TestCase):
self.assertEqual(host_data.uuid, 'include')
self.assertEqual(host_data.name, 'include')
self.assertEqual(host_data.status, 'ok')
- self.assertEqual(self.opentelemetry.ansible_version, None)
-
- def test_finish_task_include_with_ansible_version(self):
- task_fields = {'args': {'_ansible_version': '1.2.3'}}
- result = TaskResult(host=None, task=self.mock_task, return_data={}, task_fields=task_fields)
- tasks_data = OrderedDict()
- tasks_data['myuuid'] = self.my_task
-
- self.opentelemetry.finish_task(
- tasks_data,
- 'ok',
- result,
- ""
- )
-
- self.assertEqual(self.opentelemetry.ansible_version, '1.2.3')
def test_get_error_message(self):
test_cases = (
diff --git a/tests/unit/plugins/connection/test_lxc.py b/tests/unit/plugins/connection/test_lxc.py
index 5c8f187691..e65df3315c 100644
--- a/tests/unit/plugins/connection/test_lxc.py
+++ b/tests/unit/plugins/connection/test_lxc.py
@@ -117,14 +117,14 @@ class TestLXCConnectionClass():
# first call initializes the connection
conn._connect()
- assert conn.container_name is container1_name
+ assert conn.container_name == container1_name
assert conn.container is not None
assert conn.container.name == container1_name
container1 = conn.container
# second call is basically a no-op
conn._connect()
- assert conn.container_name is container1_name
+ assert conn.container_name == container1_name
assert conn.container is container1
assert conn.container.name == container1_name
diff --git a/tests/unit/plugins/connection/test_proxmox_pct_remote.py b/tests/unit/plugins/connection/test_proxmox_pct_remote.py
deleted file mode 100644
index c0e8678cdc..0000000000
--- a/tests/unit/plugins/connection/test_proxmox_pct_remote.py
+++ /dev/null
@@ -1,585 +0,0 @@
-# -*- coding: utf-8 -*-
-# Copyright (c) 2024 Nils Stein (@mietzen)
-# Copyright (c) 2024 Ansible Project
-# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-from __future__ import (annotations, absolute_import, division, print_function)
-__metaclass__ = type
-
-import os
-import pytest
-
-from ansible_collections.community.general.plugins.connection.proxmox_pct_remote import authenticity_msg, MyAddPolicy
-from ansible_collections.community.general.plugins.module_utils._filelock import FileLock, LockTimeout
-from ansible.errors import AnsibleError, AnsibleAuthenticationFailure, AnsibleConnectionFailure
-from ansible.module_utils.common.text.converters import to_bytes
-from ansible.module_utils.compat.paramiko import paramiko
-from ansible.playbook.play_context import PlayContext
-from ansible.plugins.loader import connection_loader
-from io import StringIO
-from pathlib import Path
-from unittest.mock import patch, MagicMock, mock_open
-
-
-@pytest.fixture
-def connection():
- play_context = PlayContext()
- in_stream = StringIO()
- conn = connection_loader.get('community.general.proxmox_pct_remote', play_context, in_stream)
- conn.set_option('remote_addr', '192.168.1.100')
- conn.set_option('remote_user', 'root')
- conn.set_option('password', 'password')
- return conn
-
-
-def test_connection_options(connection):
- """ Test that connection options are properly set """
- assert connection.get_option('remote_addr') == '192.168.1.100'
- assert connection.get_option('remote_user') == 'root'
- assert connection.get_option('password') == 'password'
-
-
-def test_authenticity_msg():
- """ Test authenticity message formatting """
- msg = authenticity_msg('test.host', 'ssh-rsa', 'AA:BB:CC:DD')
- assert 'test.host' in msg
- assert 'ssh-rsa' in msg
- assert 'AA:BB:CC:DD' in msg
-
-
-def test_missing_host_key(connection):
- """ Test MyAddPolicy missing_host_key method """
-
- client = MagicMock()
- key = MagicMock()
- key.get_fingerprint.return_value = b'fingerprint'
- key.get_name.return_value = 'ssh-rsa'
-
- policy = MyAddPolicy(connection)
-
- connection.set_option('host_key_auto_add', True)
- policy.missing_host_key(client, 'test.host', key)
- assert hasattr(key, '_added_by_ansible_this_time')
-
- connection.set_option('host_key_auto_add', False)
- connection.set_option('host_key_checking', False)
- policy.missing_host_key(client, 'test.host', key)
-
- connection.set_option('host_key_checking', True)
- connection.set_option('host_key_auto_add', False)
- connection.set_option('use_persistent_connections', False)
-
- with patch('ansible.utils.display.Display.prompt_until', return_value='yes'):
- policy.missing_host_key(client, 'test.host', key)
-
- with patch('ansible.utils.display.Display.prompt_until', return_value='no'):
- with pytest.raises(AnsibleError, match='host connection rejected by user'):
- policy.missing_host_key(client, 'test.host', key)
-
-
-def test_set_log_channel(connection):
- """ Test setting log channel """
- connection._set_log_channel('test_channel')
- assert connection._log_channel == 'test_channel'
-
-
-def test_parse_proxy_command(connection):
- """ Test proxy command parsing """
- connection.set_option('proxy_command', 'ssh -W %h:%p proxy.example.com')
- connection.set_option('remote_addr', 'target.example.com')
- connection.set_option('remote_user', 'testuser')
-
- result = connection._parse_proxy_command(port=2222)
- assert 'sock' in result
- assert isinstance(result['sock'], paramiko.ProxyCommand)
-
-
-@patch('paramiko.SSHClient')
-def test_connect_with_rsa_sha2_disabled(mock_ssh, connection):
- """ Test connection with RSA SHA2 algorithms disabled """
- connection.set_option('use_rsa_sha2_algorithms', False)
- mock_client = MagicMock()
- mock_ssh.return_value = mock_client
-
- connection._connect()
-
- call_kwargs = mock_client.connect.call_args[1]
- assert 'disabled_algorithms' in call_kwargs
- assert 'pubkeys' in call_kwargs['disabled_algorithms']
-
-
-@patch('paramiko.SSHClient')
-def test_connect_with_bad_host_key(mock_ssh, connection):
- """ Test connection with bad host key """
- mock_client = MagicMock()
- mock_ssh.return_value = mock_client
- mock_client.connect.side_effect = paramiko.ssh_exception.BadHostKeyException(
- 'hostname', MagicMock(), MagicMock())
-
- with pytest.raises(AnsibleConnectionFailure, match='host key mismatch'):
- connection._connect()
-
-
-@patch('paramiko.SSHClient')
-def test_connect_with_invalid_host_key(mock_ssh, connection):
- """ Test connection with bad host key """
- connection.set_option('host_key_checking', True)
- mock_client = MagicMock()
- mock_ssh.return_value = mock_client
- mock_client.load_system_host_keys.side_effect = paramiko.hostkeys.InvalidHostKey(
- "Bad Line!", Exception('Something crashed!'))
-
- with pytest.raises(AnsibleConnectionFailure, match="Invalid host key: Bad Line!"):
- connection._connect()
-
-
-@patch('paramiko.SSHClient')
-def test_connect_success(mock_ssh, connection):
- """ Test successful SSH connection establishment """
- mock_client = MagicMock()
- mock_ssh.return_value = mock_client
-
- connection._connect()
-
- assert mock_client.connect.called
- assert connection._connected
-
-
-@patch('paramiko.SSHClient')
-def test_connect_authentication_failure(mock_ssh, connection):
- """ Test SSH connection with authentication failure """
- mock_client = MagicMock()
- mock_ssh.return_value = mock_client
- mock_client.connect.side_effect = paramiko.ssh_exception.AuthenticationException('Auth failed')
-
- with pytest.raises(AnsibleAuthenticationFailure):
- connection._connect()
-
-
-def test_any_keys_added(connection):
- """ Test checking for added host keys """
- connection.ssh = MagicMock()
- connection.ssh._host_keys = {
- 'host1': {
- 'ssh-rsa': MagicMock(_added_by_ansible_this_time=True),
- 'ssh-ed25519': MagicMock(_added_by_ansible_this_time=False)
- }
- }
-
- assert connection._any_keys_added() is True
-
- connection.ssh._host_keys = {
- 'host1': {
- 'ssh-rsa': MagicMock(_added_by_ansible_this_time=False)
- }
- }
- assert connection._any_keys_added() is False
-
-
-@patch('os.path.exists')
-@patch('os.stat')
-@patch('tempfile.NamedTemporaryFile')
-def test_save_ssh_host_keys(mock_tempfile, mock_stat, mock_exists, connection):
- """ Test saving SSH host keys """
- mock_exists.return_value = True
- mock_stat.return_value = MagicMock(st_mode=0o644, st_uid=1000, st_gid=1000)
- mock_tempfile.return_value.__enter__.return_value.name = '/tmp/test_keys'
-
- connection.ssh = MagicMock()
- connection.ssh._host_keys = {
- 'host1': {
- 'ssh-rsa': MagicMock(
- get_base64=lambda: 'KEY1',
- _added_by_ansible_this_time=True
- )
- }
- }
-
- mock_open_obj = mock_open()
- with patch('builtins.open', mock_open_obj):
- connection._save_ssh_host_keys('/tmp/test_keys')
-
- mock_open_obj().write.assert_called_with('host1 ssh-rsa KEY1\n')
-
-
-def test_build_pct_command(connection):
- """ Test PCT command building with different users """
- connection.set_option('vmid', '100')
-
- cmd = connection._build_pct_command('/bin/sh -c "ls -la"')
- assert cmd == '/usr/sbin/pct exec 100 -- /bin/sh -c "ls -la"'
-
- connection.set_option('remote_user', 'user')
- connection.set_option('proxmox_become_method', 'sudo')
- cmd = connection._build_pct_command('/bin/sh -c "ls -la"')
- assert cmd == 'sudo /usr/sbin/pct exec 100 -- /bin/sh -c "ls -la"'
-
-
-@patch('paramiko.SSHClient')
-def test_exec_command_success(mock_ssh, connection):
- """ Test successful command execution """
- mock_client = MagicMock()
- mock_ssh.return_value = mock_client
- mock_channel = MagicMock()
- mock_transport = MagicMock()
-
- mock_client.get_transport.return_value = mock_transport
- mock_transport.open_session.return_value = mock_channel
- mock_channel.recv_exit_status.return_value = 0
- mock_channel.makefile.return_value = [to_bytes('stdout')]
- mock_channel.makefile_stderr.return_value = [to_bytes("")]
-
- connection._connected = True
- connection.ssh = mock_client
-
- returncode, stdout, stderr = connection.exec_command('ls -la')
-
- mock_transport.open_session.assert_called_once()
- mock_channel.get_pty.assert_called_once()
- mock_transport.set_keepalive.assert_called_once_with(5)
-
-
-@patch('paramiko.SSHClient')
-def test_exec_command_pct_not_found(mock_ssh, connection):
- """ Test command execution when PCT is not found """
- mock_client = MagicMock()
- mock_ssh.return_value = mock_client
- mock_channel = MagicMock()
- mock_transport = MagicMock()
-
- mock_client.get_transport.return_value = mock_transport
- mock_transport.open_session.return_value = mock_channel
- mock_channel.recv_exit_status.return_value = 1
- mock_channel.makefile.return_value = [to_bytes("")]
- mock_channel.makefile_stderr.return_value = [to_bytes('pct: not found')]
-
- connection._connected = True
- connection.ssh = mock_client
-
- with pytest.raises(AnsibleError, match='pct not found in path of host'):
- connection.exec_command('ls -la')
-
-
-@patch('paramiko.SSHClient')
-def test_exec_command_session_open_failure(mock_ssh, connection):
- """ Test exec_command when session opening fails """
- mock_client = MagicMock()
- mock_transport = MagicMock()
- mock_transport.open_session.side_effect = Exception('Failed to open session')
- mock_client.get_transport.return_value = mock_transport
-
- connection._connected = True
- connection.ssh = mock_client
-
- with pytest.raises(AnsibleConnectionFailure, match='Failed to open session'):
- connection.exec_command('test command')
-
-
-@patch('paramiko.SSHClient')
-def test_exec_command_with_privilege_escalation(mock_ssh, connection):
- """ Test exec_command with privilege escalation """
- mock_client = MagicMock()
- mock_channel = MagicMock()
- mock_transport = MagicMock()
-
- mock_client.get_transport.return_value = mock_transport
- mock_transport.open_session.return_value = mock_channel
- connection._connected = True
- connection.ssh = mock_client
-
- connection.become = MagicMock()
- connection.become.expect_prompt.return_value = True
- connection.become.check_success.return_value = False
- connection.become.check_password_prompt.return_value = True
- connection.become.get_option.return_value = 'sudo_password'
-
- mock_channel.recv.return_value = b'[sudo] password:'
- mock_channel.recv_exit_status.return_value = 0
- mock_channel.makefile.return_value = [b""]
- mock_channel.makefile_stderr.return_value = [b""]
-
- returncode, stdout, stderr = connection.exec_command('sudo test command')
-
- mock_channel.sendall.assert_called_once_with(b'sudo_password\n')
-
-
-def test_put_file(connection):
- """ Test putting a file to the remote system """
- connection.exec_command = MagicMock()
- connection.exec_command.return_value = (0, b"", b"")
-
- with patch('builtins.open', create=True) as mock_open:
- mock_open.return_value.__enter__.return_value.read.return_value = b'test content'
- connection.put_file('/local/path', '/remote/path')
-
- connection.exec_command.assert_called_once_with("/bin/sh -c 'cat > /remote/path'", in_data=b'test content', sudoable=False)
-
-
-@patch('paramiko.SSHClient')
-def test_put_file_general_error(mock_ssh, connection):
- """ Test put_file with general error """
- mock_client = MagicMock()
- mock_ssh.return_value = mock_client
- mock_channel = MagicMock()
- mock_transport = MagicMock()
-
- mock_client.get_transport.return_value = mock_transport
- mock_transport.open_session.return_value = mock_channel
- mock_channel.recv_exit_status.return_value = 1
- mock_channel.makefile.return_value = [to_bytes("")]
- mock_channel.makefile_stderr.return_value = [to_bytes('Some error')]
-
- connection._connected = True
- connection.ssh = mock_client
-
- with pytest.raises(AnsibleError, match='error occurred while putting file from /remote/path to /local/path'):
- connection.put_file('/remote/path', '/local/path')
-
-
-@patch('paramiko.SSHClient')
-def test_put_file_cat_not_found(mock_ssh, connection):
- """ Test command execution when cat is not found """
- mock_client = MagicMock()
- mock_ssh.return_value = mock_client
- mock_channel = MagicMock()
- mock_transport = MagicMock()
-
- mock_client.get_transport.return_value = mock_transport
- mock_transport.open_session.return_value = mock_channel
- mock_channel.recv_exit_status.return_value = 1
- mock_channel.makefile.return_value = [to_bytes("")]
- mock_channel.makefile_stderr.return_value = [to_bytes('cat: not found')]
-
- connection._connected = True
- connection.ssh = mock_client
-
- with pytest.raises(AnsibleError, match='cat not found in path of container:'):
- connection.fetch_file('/remote/path', '/local/path')
-
-
-def test_fetch_file(connection):
- """ Test fetching a file from the remote system """
- connection.exec_command = MagicMock()
- connection.exec_command.return_value = (0, b'test content', b"")
-
- with patch('builtins.open', create=True) as mock_open:
- connection.fetch_file('/remote/path', '/local/path')
-
- connection.exec_command.assert_called_once_with("/bin/sh -c 'cat /remote/path'", sudoable=False)
- mock_open.assert_called_with('/local/path', 'wb')
-
-
-@patch('paramiko.SSHClient')
-def test_fetch_file_general_error(mock_ssh, connection):
- """ Test fetch_file with general error """
- mock_client = MagicMock()
- mock_ssh.return_value = mock_client
- mock_channel = MagicMock()
- mock_transport = MagicMock()
-
- mock_client.get_transport.return_value = mock_transport
- mock_transport.open_session.return_value = mock_channel
- mock_channel.recv_exit_status.return_value = 1
- mock_channel.makefile.return_value = [to_bytes("")]
- mock_channel.makefile_stderr.return_value = [to_bytes('Some error')]
-
- connection._connected = True
- connection.ssh = mock_client
-
- with pytest.raises(AnsibleError, match='error occurred while fetching file from /remote/path to /local/path'):
- connection.fetch_file('/remote/path', '/local/path')
-
-
-@patch('paramiko.SSHClient')
-def test_fetch_file_cat_not_found(mock_ssh, connection):
- """ Test command execution when cat is not found """
- mock_client = MagicMock()
- mock_ssh.return_value = mock_client
- mock_channel = MagicMock()
- mock_transport = MagicMock()
-
- mock_client.get_transport.return_value = mock_transport
- mock_transport.open_session.return_value = mock_channel
- mock_channel.recv_exit_status.return_value = 1
- mock_channel.makefile.return_value = [to_bytes("")]
- mock_channel.makefile_stderr.return_value = [to_bytes('cat: not found')]
-
- connection._connected = True
- connection.ssh = mock_client
-
- with pytest.raises(AnsibleError, match='cat not found in path of container:'):
- connection.fetch_file('/remote/path', '/local/path')
-
-
-def test_close(connection):
- """ Test connection close """
- mock_ssh = MagicMock()
- connection.ssh = mock_ssh
- connection._connected = True
-
- connection.close()
-
- assert mock_ssh.close.called, 'ssh.close was not called'
- assert not connection._connected, 'self._connected is still True'
-
-
-def test_close_with_lock_file(connection):
- """ Test close method with lock file creation """
- connection._any_keys_added = MagicMock(return_value=True)
- connection._connected = True
- connection.keyfile = '/tmp/pct-remote-known_hosts-test'
- connection.set_option('host_key_checking', True)
- connection.set_option('lock_file_timeout', 5)
- connection.set_option('record_host_keys', True)
- connection.ssh = MagicMock()
-
- lock_file_path = os.path.join(os.path.dirname(connection.keyfile),
- f'ansible-{os.path.basename(connection.keyfile)}.lock')
-
- try:
- connection.close()
- assert os.path.exists(lock_file_path), 'Lock file was not created'
-
- lock_stat = os.stat(lock_file_path)
- assert lock_stat.st_mode & 0o777 == 0o600, 'Incorrect lock file permissions'
- finally:
- Path(lock_file_path).unlink(missing_ok=True)
-
-
-@patch('pathlib.Path.unlink')
-@patch('os.path.exists')
-def test_close_lock_file_time_out_error_handling(mock_exists, mock_unlink, connection):
- """ Test close method with lock file timeout error """
- connection._any_keys_added = MagicMock(return_value=True)
- connection._connected = True
- connection._save_ssh_host_keys = MagicMock()
- connection.keyfile = '/tmp/pct-remote-known_hosts-test'
- connection.set_option('host_key_checking', True)
- connection.set_option('lock_file_timeout', 5)
- connection.set_option('record_host_keys', True)
- connection.ssh = MagicMock()
-
- mock_exists.return_value = False
- matcher = f'writing lock file for {connection.keyfile} ran in to the timeout of {connection.get_option("lock_file_timeout")}s'
- with pytest.raises(AnsibleError, match=matcher):
- with patch('os.getuid', return_value=1000), \
- patch('os.getgid', return_value=1000), \
- patch('os.chmod'), patch('os.chown'), \
- patch('os.rename'), \
- patch.object(FileLock, 'lock_file', side_effect=LockTimeout()):
- connection.close()
-
-
-@patch('ansible_collections.community.general.plugins.module_utils._filelock.FileLock.lock_file')
-@patch('tempfile.NamedTemporaryFile')
-@patch('os.chmod')
-@patch('os.chown')
-@patch('os.rename')
-@patch('os.path.exists')
-def test_tempfile_creation_and_move(mock_exists, mock_rename, mock_chown, mock_chmod, mock_tempfile, mock_lock_file, connection):
- """ Test tempfile creation and move during close """
- connection._any_keys_added = MagicMock(return_value=True)
- connection._connected = True
- connection._save_ssh_host_keys = MagicMock()
- connection.keyfile = '/tmp/pct-remote-known_hosts-test'
- connection.set_option('host_key_checking', True)
- connection.set_option('lock_file_timeout', 5)
- connection.set_option('record_host_keys', True)
- connection.ssh = MagicMock()
-
- mock_exists.return_value = False
-
- mock_lock_file_instance = MagicMock()
- mock_lock_file.return_value = mock_lock_file_instance
- mock_lock_file_instance.__enter__.return_value = None
-
- mock_tempfile_instance = MagicMock()
- mock_tempfile_instance.name = '/tmp/mock_tempfile'
- mock_tempfile.return_value.__enter__.return_value = mock_tempfile_instance
-
- mode = 0o644
- uid = 1000
- gid = 1000
- key_dir = os.path.dirname(connection.keyfile)
-
- with patch('os.getuid', return_value=uid), patch('os.getgid', return_value=gid):
- connection.close()
-
- connection._save_ssh_host_keys.assert_called_once_with('/tmp/mock_tempfile')
- mock_chmod.assert_called_once_with('/tmp/mock_tempfile', mode)
- mock_chown.assert_called_once_with('/tmp/mock_tempfile', uid, gid)
- mock_rename.assert_called_once_with('/tmp/mock_tempfile', connection.keyfile)
- mock_tempfile.assert_called_once_with(dir=key_dir, delete=False)
-
-
-@patch('pathlib.Path.unlink')
-@patch('tempfile.NamedTemporaryFile')
-@patch('ansible_collections.community.general.plugins.module_utils._filelock.FileLock.lock_file')
-@patch('os.path.exists')
-def test_close_tempfile_error_handling(mock_exists, mock_lock_file, mock_tempfile, mock_unlink, connection):
- """ Test tempfile creation error """
- connection._any_keys_added = MagicMock(return_value=True)
- connection._connected = True
- connection._save_ssh_host_keys = MagicMock()
- connection.keyfile = '/tmp/pct-remote-known_hosts-test'
- connection.set_option('host_key_checking', True)
- connection.set_option('lock_file_timeout', 5)
- connection.set_option('record_host_keys', True)
- connection.ssh = MagicMock()
-
- mock_exists.return_value = False
-
- mock_lock_file_instance = MagicMock()
- mock_lock_file.return_value = mock_lock_file_instance
- mock_lock_file_instance.__enter__.return_value = None
-
- mock_tempfile_instance = MagicMock()
- mock_tempfile_instance.name = '/tmp/mock_tempfile'
- mock_tempfile.return_value.__enter__.return_value = mock_tempfile_instance
-
- with pytest.raises(AnsibleError, match='error occurred while writing SSH host keys!'):
- with patch.object(os, 'chmod', side_effect=Exception()):
- connection.close()
- mock_unlink.assert_called_with(missing_ok=True)
-
-
-@patch('ansible_collections.community.general.plugins.module_utils._filelock.FileLock.lock_file')
-@patch('os.path.exists')
-def test_close_with_invalid_host_key(mock_exists, mock_lock_file, connection):
- """ Test load_system_host_keys on close with InvalidHostKey error """
- connection._any_keys_added = MagicMock(return_value=True)
- connection._connected = True
- connection._save_ssh_host_keys = MagicMock()
- connection.keyfile = '/tmp/pct-remote-known_hosts-test'
- connection.set_option('host_key_checking', True)
- connection.set_option('lock_file_timeout', 5)
- connection.set_option('record_host_keys', True)
- connection.ssh = MagicMock()
- connection.ssh.load_system_host_keys.side_effect = paramiko.hostkeys.InvalidHostKey(
- "Bad Line!", Exception('Something crashed!'))
-
- mock_exists.return_value = False
-
- mock_lock_file_instance = MagicMock()
- mock_lock_file.return_value = mock_lock_file_instance
- mock_lock_file_instance.__enter__.return_value = None
-
- with pytest.raises(AnsibleConnectionFailure, match="Invalid host key: Bad Line!"):
- connection.close()
-
-
-def test_reset(connection):
- """ Test connection reset """
- connection._connected = True
- connection.close = MagicMock()
- connection._connect = MagicMock()
-
- connection.reset()
-
- connection.close.assert_called_once()
- connection._connect.assert_called_once()
-
- connection._connected = False
- connection.reset()
- assert connection.close.call_count == 1
diff --git a/tests/unit/plugins/connection/test_wsl.py b/tests/unit/plugins/connection/test_wsl.py
index 768f9a8d27..c28d1fbec9 100644
--- a/tests/unit/plugins/connection/test_wsl.py
+++ b/tests/unit/plugins/connection/test_wsl.py
@@ -12,12 +12,14 @@ from ansible_collections.community.general.plugins.connection.wsl import authent
from ansible_collections.community.general.plugins.module_utils._filelock import FileLock, LockTimeout
from ansible.errors import AnsibleError, AnsibleAuthenticationFailure, AnsibleConnectionFailure
from ansible.module_utils.common.text.converters import to_bytes
-from ansible.module_utils.compat.paramiko import paramiko
from ansible.playbook.play_context import PlayContext
from ansible.plugins.loader import connection_loader
from io import StringIO
from pathlib import Path
-from unittest.mock import patch, MagicMock, mock_open
+from ansible_collections.community.internal_test_tools.tests.unit.compat.mock import patch, MagicMock, mock_open
+
+
+paramiko = pytest.importorskip('paramiko')
@pytest.fixture
diff --git a/tests/unit/plugins/inventory/test_linode.py b/tests/unit/plugins/inventory/test_linode.py
index 0f239f2dd9..ead41591a7 100644
--- a/tests/unit/plugins/inventory/test_linode.py
+++ b/tests/unit/plugins/inventory/test_linode.py
@@ -7,14 +7,8 @@ from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import pytest
-import sys
linode_apiv4 = pytest.importorskip('linode_api4')
-mandatory_py_version = pytest.mark.skipif(
- sys.version_info < (2, 7),
- reason='The linode_api4 dependency requires python2.7 or higher'
-)
-
from ansible.errors import AnsibleError
from ansible.parsing.dataloader import DataLoader
diff --git a/tests/unit/plugins/inventory/test_proxmox.py b/tests/unit/plugins/inventory/test_proxmox.py
deleted file mode 100644
index b8358df226..0000000000
--- a/tests/unit/plugins/inventory/test_proxmox.py
+++ /dev/null
@@ -1,786 +0,0 @@
-# -*- coding: utf-8 -*-
-# Copyright (c) 2020, Jeffrey van Pelt
-# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
-# SPDX-License-Identifier: GPL-3.0-or-later
-#
-# The API responses used in these tests were recorded from PVE version 6.2.
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-import pytest
-
-from ansible.inventory.data import InventoryData
-from ansible_collections.community.general.plugins.inventory.proxmox import InventoryModule
-
-
-@pytest.fixture(scope="module")
-def inventory():
- r = InventoryModule()
- r.inventory = InventoryData()
- return r
-
-
-def test_verify_file(tmp_path, inventory):
- file = tmp_path / "foobar.proxmox.yml"
- file.touch()
- assert inventory.verify_file(str(file)) is True
-
-
-def test_verify_file_bad_config(inventory):
- assert inventory.verify_file('foobar.proxmox.yml') is False
-
-
-def get_auth():
- return True
-
-
-# NOTE: when updating/adding replies to this function,
-# be sure to only add only the _contents_ of the 'data' dict in the API reply
-def get_json(url, ignore_errors=None):
- if url == "https://localhost:8006/api2/json/nodes":
- # _get_nodes
- return [{"type": "node",
- "cpu": 0.01,
- "maxdisk": 500,
- "mem": 500,
- "node": "testnode",
- "id": "node/testnode",
- "maxcpu": 1,
- "status": "online",
- "ssl_fingerprint": "xx",
- "disk": 1000,
- "maxmem": 1000,
- "uptime": 10000,
- "level": ""},
- {"type": "node",
- "node": "testnode2",
- "id": "node/testnode2",
- "status": "offline",
- "ssl_fingerprint": "yy"}]
- elif url == "https://localhost:8006/api2/json/pools":
- # _get_pools
- return [{"poolid": "test"}]
- elif url == "https://localhost:8006/api2/json/nodes/testnode/lxc":
- # _get_lxc_per_node
- return [{"cpus": 1,
- "name": "test-lxc",
- "cpu": 0.01,
- "diskwrite": 0,
- "lock": "",
- "maxmem": 1000,
- "template": "",
- "diskread": 0,
- "mem": 1000,
- "swap": 0,
- "type": "lxc",
- "maxswap": 0,
- "maxdisk": "1000",
- "netout": 1000,
- "pid": "1000",
- "netin": 1000,
- "status": "running",
- "vmid": "100",
- "disk": "1000",
- "uptime": 1000}]
- elif url == "https://localhost:8006/api2/json/nodes/testnode/qemu":
- # _get_qemu_per_node
- return [{"name": "test-qemu",
- "cpus": 1,
- "mem": 1000,
- "template": "",
- "diskread": 0,
- "cpu": 0.01,
- "maxmem": 1000,
- "diskwrite": 0,
- "netout": 1000,
- "pid": "1001",
- "netin": 1000,
- "maxdisk": 1000,
- "vmid": "101",
- "uptime": 1000,
- "disk": 0,
- "status": "running"},
- {"name": "test-qemu-windows",
- "cpus": 1,
- "mem": 1000,
- "template": "",
- "diskread": 0,
- "cpu": 0.01,
- "maxmem": 1000,
- "diskwrite": 0,
- "netout": 1000,
- "pid": "1001",
- "netin": 1000,
- "maxdisk": 1000,
- "vmid": "102",
- "uptime": 1000,
- "disk": 0,
- "status": "running"},
- {"name": "test-qemu-multi-nic",
- "cpus": 1,
- "mem": 1000,
- "template": "",
- "diskread": 0,
- "cpu": 0.01,
- "maxmem": 1000,
- "diskwrite": 0,
- "netout": 1000,
- "pid": "1001",
- "netin": 1000,
- "maxdisk": 1000,
- "vmid": "103",
- "uptime": 1000,
- "disk": 0,
- "status": "running"},
- {"name": "test-qemu-template",
- "cpus": 1,
- "mem": 0,
- "template": 1,
- "diskread": 0,
- "cpu": 0,
- "maxmem": 1000,
- "diskwrite": 0,
- "netout": 0,
- "pid": "1001",
- "netin": 0,
- "maxdisk": 1000,
- "vmid": "9001",
- "uptime": 0,
- "disk": 0,
- "status": "stopped"}]
- elif url == "https://localhost:8006/api2/json/pools/test":
- # _get_members_per_pool
- return {"members": [{"uptime": 1000,
- "template": 0,
- "id": "qemu/101",
- "mem": 1000,
- "status": "running",
- "cpu": 0.01,
- "maxmem": 1000,
- "diskwrite": 1000,
- "name": "test-qemu",
- "netout": 1000,
- "netin": 1000,
- "vmid": 101,
- "node": "testnode",
- "maxcpu": 1,
- "type": "qemu",
- "maxdisk": 1000,
- "disk": 0,
- "diskread": 1000}]}
- elif url == "https://localhost:8006/api2/json/nodes/testnode/network":
- # _get_node_ip
- return [{"families": ["inet"],
- "priority": 3,
- "active": 1,
- "cidr": "10.1.1.2/24",
- "iface": "eth0",
- "method": "static",
- "exists": 1,
- "type": "eth",
- "netmask": "24",
- "gateway": "10.1.1.1",
- "address": "10.1.1.2",
- "method6": "manual",
- "autostart": 1},
- {"method6": "manual",
- "autostart": 1,
- "type": "OVSPort",
- "exists": 1,
- "method": "manual",
- "iface": "eth1",
- "ovs_bridge": "vmbr0",
- "active": 1,
- "families": ["inet"],
- "priority": 5,
- "ovs_type": "OVSPort"},
- {"type": "OVSBridge",
- "method": "manual",
- "iface": "vmbr0",
- "families": ["inet"],
- "priority": 4,
- "ovs_ports": "eth1",
- "ovs_type": "OVSBridge",
- "method6": "manual",
- "autostart": 1,
- "active": 1}]
- elif url == "https://localhost:8006/api2/json/nodes/testnode/lxc/100/config":
- # _get_vm_config (lxc)
- return {
- "console": 1,
- "rootfs": "local-lvm:vm-100-disk-0,size=4G",
- "cmode": "tty",
- "description": "A testnode",
- "cores": 1,
- "hostname": "test-lxc",
- "arch": "amd64",
- "tty": 2,
- "swap": 0,
- "cpulimit": "0",
- "net0": "name=eth0,bridge=vmbr0,gw=10.1.1.1,hwaddr=FF:FF:FF:FF:FF:FF,ip=10.1.1.3/24,type=veth",
- "ostype": "ubuntu",
- "digest": "123456789abcdef0123456789abcdef01234567890",
- "protection": 0,
- "memory": 1000,
- "onboot": 0,
- "cpuunits": 1024,
- "tags": "one, two, three",
- }
- elif url == "https://localhost:8006/api2/json/nodes/testnode/qemu/101/config":
- # _get_vm_config (qemu)
- return {
- "tags": "one, two, three",
- "cores": 1,
- "ide2": "none,media=cdrom",
- "memory": 1000,
- "kvm": 1,
- "digest": "0123456789abcdef0123456789abcdef0123456789",
- "description": "A test qemu",
- "sockets": 1,
- "onboot": 1,
- "vmgenid": "ffffffff-ffff-ffff-ffff-ffffffffffff",
- "numa": 0,
- "bootdisk": "scsi0",
- "cpu": "host",
- "name": "test-qemu",
- "ostype": "l26",
- "hotplug": "network,disk,usb",
- "scsi0": "local-lvm:vm-101-disk-0,size=8G",
- "net0": "virtio=ff:ff:ff:ff:ff:ff,bridge=vmbr0,firewall=1",
- "agent": "1,fstrim_cloned_disks=1",
- "bios": "seabios",
- "ide0": "local-lvm:vm-101-cloudinit,media=cdrom,size=4M",
- "boot": "cdn",
- "scsihw": "virtio-scsi-pci",
- "smbios1": "uuid=ffffffff-ffff-ffff-ffff-ffffffffffff"
- }
- elif url == "https://localhost:8006/api2/json/nodes/testnode/qemu/102/config":
- # _get_vm_config (qemu)
- return {
- "numa": 0,
- "digest": "460add1531a7068d2ae62d54f67e8fb9493dece9",
- "ide2": "none,media=cdrom",
- "bootdisk": "sata0",
- "name": "test-qemu-windows",
- "balloon": 0,
- "cpulimit": "4",
- "agent": "1",
- "cores": 6,
- "sata0": "storage:vm-102-disk-0,size=100G",
- "memory": 10240,
- "smbios1": "uuid=127301fc-0122-48d5-8fc5-c04fa78d8146",
- "scsihw": "virtio-scsi-pci",
- "sockets": 1,
- "ostype": "win8",
- "net0": "virtio=ff:ff:ff:ff:ff:ff,bridge=vmbr0",
- "onboot": 1
- }
- elif url == "https://localhost:8006/api2/json/nodes/testnode/qemu/103/config":
- # _get_vm_config (qemu)
- return {
- 'scsi1': 'storage:vm-103-disk-3,size=30G',
- 'sockets': 1,
- 'memory': 8192,
- 'ostype': 'l26',
- 'scsihw': 'virtio-scsi-pci',
- "net0": "virtio=ff:ff:ff:ff:ff:ff,bridge=vmbr0",
- "net1": "virtio=ff:ff:ff:ff:ff:ff,bridge=vmbr1",
- 'bootdisk': 'scsi0',
- 'scsi0': 'storage:vm-103-disk-0,size=10G',
- 'name': 'test-qemu-multi-nic',
- 'cores': 4,
- 'digest': '51b7599f869b9a3f564804a0aed290f3de803292',
- 'smbios1': 'uuid=863b31c3-42ca-4a92-aed7-4111f342f70a',
- 'agent': '1,type=virtio',
- 'ide2': 'none,media=cdrom',
- 'balloon': 0,
- 'numa': 0,
- 'scsi2': 'storage:vm-103-disk-2,size=10G',
- 'serial0': 'socket',
- 'vmgenid': 'ddfb79b2-b484-4d66-88e7-6e76f2d1be77',
- 'onboot': 1,
- 'tablet': 0
- }
-
- elif url == "https://localhost:8006/api2/json/nodes/testnode/qemu/101/agent/network-get-interfaces":
- # _get_agent_network_interfaces
- return {"result": [
- {
- "hardware-address": "00:00:00:00:00:00",
- "ip-addresses": [
- {
- "prefix": 8,
- "ip-address-type": "ipv4",
- "ip-address": "127.0.0.1"
- },
- {
- "ip-address-type": "ipv6",
- "ip-address": "::1",
- "prefix": 128
- }],
- "statistics": {
- "rx-errs": 0,
- "rx-bytes": 163244,
- "rx-packets": 1623,
- "rx-dropped": 0,
- "tx-dropped": 0,
- "tx-packets": 1623,
- "tx-bytes": 163244,
- "tx-errs": 0},
- "name": "lo"},
- {
- "statistics": {
- "rx-packets": 4025,
- "rx-dropped": 12,
- "rx-bytes": 324105,
- "rx-errs": 0,
- "tx-errs": 0,
- "tx-bytes": 368860,
- "tx-packets": 3479,
- "tx-dropped": 0},
- "name": "eth0",
- "ip-addresses": [
- {
- "prefix": 24,
- "ip-address-type": "ipv4",
- "ip-address": "10.1.2.3"
- },
- {
- "prefix": 64,
- "ip-address": "fd8c:4687:e88d:1be3:5b70:7b88:c79c:293",
- "ip-address-type": "ipv6"
- }],
- "hardware-address": "ff:ff:ff:ff:ff:ff"
- },
- {
- "hardware-address": "ff:ff:ff:ff:ff:ff",
- "ip-addresses": [
- {
- "prefix": 16,
- "ip-address": "10.10.2.3",
- "ip-address-type": "ipv4"
- }],
- "name": "docker0",
- "statistics": {
- "rx-bytes": 0,
- "rx-errs": 0,
- "rx-dropped": 0,
- "rx-packets": 0,
- "tx-packets": 0,
- "tx-dropped": 0,
- "tx-errs": 0,
- "tx-bytes": 0
- }}]}
- elif url == "https://localhost:8006/api2/json/nodes/testnode/qemu/102/agent/network-get-interfaces":
- # _get_agent_network_interfaces
- return {"result": {'error': {'desc': 'this feature or command is not currently supported', 'class': 'Unsupported'}}}
- elif url == "https://localhost:8006/api2/json/nodes/testnode/qemu/103/agent/network-get-interfaces":
- # _get_agent_network_interfaces
- return {
- "result": [
- {
- "statistics": {
- "tx-errs": 0,
- "rx-errs": 0,
- "rx-dropped": 0,
- "tx-bytes": 48132932372,
- "tx-dropped": 0,
- "rx-bytes": 48132932372,
- "tx-packets": 178578980,
- "rx-packets": 178578980
- },
- "hardware-address": "ff:ff:ff:ff:ff:ff",
- "ip-addresses": [
- {
- "ip-address-type": "ipv4",
- "prefix": 8,
- "ip-address": "127.0.0.1"
- }
- ],
- "name": "lo"
- },
- {
- "name": "eth0",
- "ip-addresses": [
- {
- "ip-address-type": "ipv4",
- "prefix": 24,
- "ip-address": "172.16.0.143"
- }
- ],
- "statistics": {
- "rx-errs": 0,
- "tx-errs": 0,
- "rx-packets": 660028,
- "tx-packets": 304599,
- "tx-dropped": 0,
- "rx-bytes": 1846743499,
- "tx-bytes": 1287844926,
- "rx-dropped": 0
- },
- "hardware-address": "ff:ff:ff:ff:ff:ff"
- },
- {
- "name": "eth1",
- "hardware-address": "ff:ff:ff:ff:ff:ff",
- "statistics": {
- "rx-bytes": 235717091946,
- "tx-dropped": 0,
- "rx-dropped": 0,
- "tx-bytes": 123411636251,
- "rx-packets": 540431277,
- "tx-packets": 468411864,
- "rx-errs": 0,
- "tx-errs": 0
- },
- "ip-addresses": [
- {
- "ip-address": "10.0.0.133",
- "prefix": 24,
- "ip-address-type": "ipv4"
- }
- ]
- },
- {
- "name": "docker0",
- "ip-addresses": [
- {
- "ip-address": "172.17.0.1",
- "prefix": 16,
- "ip-address-type": "ipv4"
- }
- ],
- "hardware-address": "ff:ff:ff:ff:ff:ff",
- "statistics": {
- "rx-errs": 0,
- "tx-errs": 0,
- "rx-packets": 0,
- "tx-packets": 0,
- "tx-dropped": 0,
- "rx-bytes": 0,
- "rx-dropped": 0,
- "tx-bytes": 0
- }
- },
- {
- "hardware-address": "ff:ff:ff:ff:ff:ff",
- "name": "datapath"
- },
- {
- "name": "weave",
- "ip-addresses": [
- {
- "ip-address": "10.42.0.1",
- "ip-address-type": "ipv4",
- "prefix": 16
- }
- ],
- "hardware-address": "ff:ff:ff:ff:ff:ff",
- "statistics": {
- "rx-bytes": 127289123306,
- "tx-dropped": 0,
- "rx-dropped": 0,
- "tx-bytes": 43827573343,
- "rx-packets": 132750542,
- "tx-packets": 74218762,
- "rx-errs": 0,
- "tx-errs": 0
- }
- },
- {
- "name": "vethwe-datapath",
- "hardware-address": "ff:ff:ff:ff:ff:ff"
- },
- {
- "name": "vethwe-bridge",
- "hardware-address": "ff:ff:ff:ff:ff:ff"
- },
- {
- "hardware-address": "ff:ff:ff:ff:ff:ff",
- "name": "vxlan-6784"
- },
- {
- "name": "vethwepl0dfe1fe",
- "hardware-address": "ff:ff:ff:ff:ff:ff"
- },
- {
- "name": "vethweplf1e7715",
- "hardware-address": "ff:ff:ff:ff:ff:ff"
- },
- {
- "hardware-address": "ff:ff:ff:ff:ff:ff",
- "name": "vethwepl9d244a1"
- },
- {
- "hardware-address": "ff:ff:ff:ff:ff:ff",
- "name": "vethwepl2ca477b"
- },
- {
- "name": "nomacorip",
- }
- ]
- }
- elif url == "https://localhost:8006/api2/json/nodes/testnode/lxc/100/status/current":
- # _get_vm_status (lxc)
- return {
- "swap": 0,
- "name": "test-lxc",
- "diskread": 0,
- "vmid": 100,
- "diskwrite": 0,
- "pid": 9000,
- "mem": 89980928,
- "netin": 1950776396424,
- "disk": 4998168576,
- "cpu": 0.00163430613110039,
- "type": "lxc",
- "uptime": 6793736,
- "maxmem": 1073741824,
- "status": "running",
- "cpus": "1",
- "ha": {
- "group": 'null',
- "state": "started",
- "managed": 1
- },
- "maxdisk": 3348329267200,
- "netout": 1947793356037,
- "maxswap": 1073741824
- }
- elif url == "https://localhost:8006/api2/json/nodes/testnode/qemu/101/status/current":
- # _get_vm_status (qemu)
- return {
- "status": "stopped",
- "uptime": 0,
- "maxmem": 5364514816,
- "maxdisk": 34359738368,
- "netout": 0,
- "cpus": 2,
- "ha": {
- "managed": 0
- },
- "diskread": 0,
- "vmid": 101,
- "diskwrite": 0,
- "name": "test-qemu",
- "cpu": 0,
- "disk": 0,
- "netin": 0,
- "mem": 0,
- "qmpstatus": "stopped"
- }
- elif url == "https://localhost:8006/api2/json/nodes/testnode/qemu/102/status/current":
- # _get_vm_status (qemu)
- return {
- "status": "stopped",
- "uptime": 0,
- "maxmem": 5364514816,
- "maxdisk": 34359738368,
- "netout": 0,
- "cpus": 2,
- "ha": {
- "managed": 0
- },
- "diskread": 0,
- "vmid": 102,
- "diskwrite": 0,
- "name": "test-qemu-windows",
- "cpu": 0,
- "disk": 0,
- "netin": 0,
- "mem": 0,
- "qmpstatus": "prelaunch"
- }
- elif url == "https://localhost:8006/api2/json/nodes/testnode/qemu/103/status/current":
- # _get_vm_status (qemu)
- return {
- "status": "stopped",
- "uptime": 0,
- "maxmem": 5364514816,
- "maxdisk": 34359738368,
- "netout": 0,
- "cpus": 2,
- "ha": {
- "managed": 0
- },
- "diskread": 0,
- "vmid": 103,
- "diskwrite": 0,
- "name": "test-qemu-multi-nic",
- "cpu": 0,
- "disk": 0,
- "netin": 0,
- "mem": 0,
- "qmpstatus": "paused"
- }
-
-
-def get_vm_snapshots(node, properties, vmtype, vmid, name):
- return [
- {"description": "",
- "name": "clean",
- "snaptime": 1000,
- "vmstate": 0
- },
- {"name": "current",
- "digest": "1234689abcdf",
- "running": 0,
- "description": "You are here!",
- "parent": "clean"
- }]
-
-
-def get_option(opts):
- def fn(option):
- default = opts.get('default', False)
- return opts.get(option, default)
- return fn
-
-
-def test_populate(inventory, mocker):
- # module settings
- inventory.proxmox_user = 'root@pam'
- inventory.proxmox_password = 'password'
- inventory.proxmox_url = 'https://localhost:8006'
- inventory.group_prefix = 'proxmox_'
- inventory.facts_prefix = 'proxmox_'
- inventory.strict = False
- inventory.exclude_nodes = False
-
- opts = {
- 'group_prefix': 'proxmox_',
- 'facts_prefix': 'proxmox_',
- 'want_facts': True,
- 'want_proxmox_nodes_ansible_host': True,
- 'qemu_extended_statuses': True,
- 'exclude_nodes': False
- }
-
- # bypass authentication and API fetch calls
- inventory._get_auth = mocker.MagicMock(side_effect=get_auth)
- inventory._get_json = mocker.MagicMock(side_effect=get_json)
- inventory._get_vm_snapshots = mocker.MagicMock(side_effect=get_vm_snapshots)
- inventory.get_option = mocker.MagicMock(side_effect=get_option(opts))
- inventory._can_add_host = mocker.MagicMock(return_value=True)
- inventory._populate()
-
- # get different hosts
- host_qemu = inventory.inventory.get_host('test-qemu')
- host_qemu_windows = inventory.inventory.get_host('test-qemu-windows')
- host_qemu_multi_nic = inventory.inventory.get_host('test-qemu-multi-nic')
- host_qemu_template = inventory.inventory.get_host('test-qemu-template')
- host_lxc = inventory.inventory.get_host('test-lxc')
-
- # check if qemu-test is in the proxmox_pool_test group
- assert 'proxmox_pool_test' in inventory.inventory.groups
- group_qemu = inventory.inventory.groups['proxmox_pool_test']
- assert group_qemu.hosts == [host_qemu]
-
- # check if qemu-test has eth0 interface in agent_interfaces fact
- assert 'eth0' in [d['name'] for d in host_qemu.get_vars()['proxmox_agent_interfaces']]
-
- # check if qemu-multi-nic has multiple network interfaces
- for iface_name in ['eth0', 'eth1', 'weave']:
- assert iface_name in [d['name'] for d in host_qemu_multi_nic.get_vars()['proxmox_agent_interfaces']]
-
- # check if interface with no mac-address or ip-address defaults correctly
- assert [iface for iface in host_qemu_multi_nic.get_vars()['proxmox_agent_interfaces']
- if iface['name'] == 'nomacorip'
- and iface['mac-address'] == ''
- and iface['ip-addresses'] == []
- ]
-
- # check to make sure qemu-windows doesn't have proxmox_agent_interfaces
- assert "proxmox_agent_interfaces" not in host_qemu_windows.get_vars()
-
- # check if lxc-test has been discovered correctly
- group_lxc = inventory.inventory.groups['proxmox_all_lxc']
- assert group_lxc.hosts == [host_lxc]
-
- # check if qemu template is not present
- assert host_qemu_template is None
-
- # check that offline node is in inventory
- assert inventory.inventory.get_host('testnode2')
-
- # make sure that ['prelaunch', 'paused'] are in the group list
- for group in ['paused', 'prelaunch']:
- assert ('%sall_%s' % (inventory.group_prefix, group)) in inventory.inventory.groups
-
- # check if qemu-windows is in the prelaunch group
- group_prelaunch = inventory.inventory.groups['proxmox_all_prelaunch']
- assert group_prelaunch.hosts == [host_qemu_windows]
-
- # check if qemu-multi-nic is in the paused group
- group_paused = inventory.inventory.groups['proxmox_all_paused']
- assert group_paused.hosts == [host_qemu_multi_nic]
-
-
-def test_populate_missing_qemu_extended_groups(inventory, mocker):
- # module settings
- inventory.proxmox_user = 'root@pam'
- inventory.proxmox_password = 'password'
- inventory.proxmox_url = 'https://localhost:8006'
- inventory.group_prefix = 'proxmox_'
- inventory.facts_prefix = 'proxmox_'
- inventory.strict = False
- inventory.exclude_nodes = False
-
- opts = {
- 'group_prefix': 'proxmox_',
- 'facts_prefix': 'proxmox_',
- 'want_facts': True,
- 'want_proxmox_nodes_ansible_host': True,
- 'qemu_extended_statuses': False,
- 'exclude_nodes': False
- }
-
- # bypass authentication and API fetch calls
- inventory._get_auth = mocker.MagicMock(side_effect=get_auth)
- inventory._get_json = mocker.MagicMock(side_effect=get_json)
- inventory._get_vm_snapshots = mocker.MagicMock(side_effect=get_vm_snapshots)
- inventory.get_option = mocker.MagicMock(side_effect=get_option(opts))
- inventory._can_add_host = mocker.MagicMock(return_value=True)
- inventory._populate()
-
- # make sure that ['prelaunch', 'paused'] are not in the group list
- for group in ['paused', 'prelaunch']:
- assert ('%sall_%s' % (inventory.group_prefix, group)) not in inventory.inventory.groups
-
-
-def test_populate_exclude_nodes(inventory, mocker):
- # module settings
- inventory.proxmox_user = 'root@pam'
- inventory.proxmox_password = 'password'
- inventory.proxmox_url = 'https://localhost:8006'
- inventory.group_prefix = 'proxmox_'
- inventory.facts_prefix = 'proxmox_'
- inventory.strict = False
- inventory.exclude_nodes = True
-
- opts = {
- 'group_prefix': 'proxmox_',
- 'facts_prefix': 'proxmox_',
- 'want_facts': True,
- 'want_proxmox_nodes_ansible_host': True,
- 'qemu_extended_statuses': False,
- 'exclude_nodes': True
- }
-
- # bypass authentication and API fetch calls
- inventory._get_auth = mocker.MagicMock(side_effect=get_auth)
- inventory._get_json = mocker.MagicMock(side_effect=get_json)
- inventory._get_vm_snapshots = mocker.MagicMock(side_effect=get_vm_snapshots)
- inventory.get_option = mocker.MagicMock(side_effect=get_option(opts))
- inventory._can_add_host = mocker.MagicMock(return_value=True)
- inventory._populate()
-
- # make sure that nodes are not in the inventory
- for node in ['testnode', 'testnode2']:
- assert node not in inventory.inventory.hosts
- # make sure that nodes group is absent
- assert ('%s_nodes' % (inventory.group_prefix)) not in inventory.inventory.groups
- # make sure that nodes are not in the "ungrouped" group
- for node in ['testnode', 'testnode2']:
- assert node not in inventory.inventory.get_groups_dict()["ungrouped"]
diff --git a/tests/unit/plugins/inventory/test_stackpath_compute.py b/tests/unit/plugins/inventory/test_stackpath_compute.py
deleted file mode 100644
index 781db50b73..0000000000
--- a/tests/unit/plugins/inventory/test_stackpath_compute.py
+++ /dev/null
@@ -1,206 +0,0 @@
-# Copyright (c) 2020 Shay Rybak
-# Copyright (c) 2020 Ansible Project
-# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-import pytest
-
-from ansible.errors import AnsibleError
-from ansible.inventory.data import InventoryData
-from ansible_collections.community.general.plugins.inventory.stackpath_compute import InventoryModule
-
-
-@pytest.fixture(scope="module")
-def inventory():
- r = InventoryModule()
- r.inventory = InventoryData()
- return r
-
-
-def test_get_stack_slugs(inventory):
- stacks = [
- {
- 'status': 'ACTIVE',
- 'name': 'test1',
- 'id': 'XXXX',
- 'updatedAt': '2020-07-08T01:00:00.000000Z',
- 'slug': 'test1',
- 'createdAt': '2020-07-08T00:00:00.000000Z',
- 'accountId': 'XXXX',
- }, {
- 'status': 'ACTIVE',
- 'name': 'test2',
- 'id': 'XXXX',
- 'updatedAt': '2019-10-22T18:00:00.000000Z',
- 'slug': 'test2',
- 'createdAt': '2019-10-22T18:00:00.000000Z',
- 'accountId': 'XXXX',
- }, {
- 'status': 'DISABLED',
- 'name': 'test3',
- 'id': 'XXXX',
- 'updatedAt': '2020-01-16T20:00:00.000000Z',
- 'slug': 'test3',
- 'createdAt': '2019-10-15T13:00:00.000000Z',
- 'accountId': 'XXXX',
- }, {
- 'status': 'ACTIVE',
- 'name': 'test4',
- 'id': 'XXXX',
- 'updatedAt': '2019-11-20T22:00:00.000000Z',
- 'slug': 'test4',
- 'createdAt': '2019-11-20T22:00:00.000000Z',
- 'accountId': 'XXXX',
- }
- ]
- inventory._get_stack_slugs(stacks)
- assert len(inventory.stack_slugs) == 4
- assert inventory.stack_slugs == [
- "test1",
- "test2",
- "test3",
- "test4"
- ]
-
-
-def test_verify_file(tmp_path, inventory):
- file = tmp_path / "foobar.stackpath_compute.yml"
- file.touch()
- assert inventory.verify_file(str(file)) is True
-
-
-def test_verify_file_bad_config(inventory):
- assert inventory.verify_file('foobar.stackpath_compute.yml') is False
-
-
-def test_validate_config(inventory):
- config = {
- "client_secret": "short_client_secret",
- "use_internal_ip": False,
- "stack_slugs": ["test1"],
- "client_id": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx",
- "plugin": "community.general.stackpath_compute",
- }
- with pytest.raises(AnsibleError) as error_message:
- inventory._validate_config(config)
- assert "client_secret must be 64 characters long" in error_message
-
- config = {
- "client_secret": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx",
- "use_internal_ip": True,
- "stack_slugs": ["test1"],
- "client_id": "short_client_id",
- "plugin": "community.general.stackpath_compute",
- }
- with pytest.raises(AnsibleError) as error_message:
- inventory._validate_config(config)
- assert "client_id must be 32 characters long" in error_message
-
- config = {
- "use_internal_ip": True,
- "stack_slugs": ["test1"],
- "client_id": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx",
- "plugin": "community.general.stackpath_compute",
- }
- with pytest.raises(AnsibleError) as error_message:
- inventory._validate_config(config)
- assert "config missing client_secret, a required parameter" in error_message
-
- config = {
- "client_secret": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx",
- "use_internal_ip": False,
- "plugin": "community.general.stackpath_compute",
- }
- with pytest.raises(AnsibleError) as error_message:
- inventory._validate_config(config)
- assert "config missing client_id, a required parameter" in error_message
-
-
-def test_populate(inventory):
- instances = [
- {
- "name": "instance1",
- "countryCode": "SE",
- "workloadSlug": "wokrload1",
- "continent": "Europe",
- "workloadId": "id1",
- "cityCode": "ARN",
- "externalIpAddress": "20.0.0.1",
- "target": "target1",
- "stackSlug": "stack1",
- "ipAddress": "10.0.0.1",
- },
- {
- "name": "instance2",
- "countryCode": "US",
- "workloadSlug": "wokrload2",
- "continent": "America",
- "workloadId": "id2",
- "cityCode": "JFK",
- "externalIpAddress": "20.0.0.2",
- "target": "target2",
- "stackSlug": "stack1",
- "ipAddress": "10.0.0.2",
- },
- {
- "name": "instance3",
- "countryCode": "SE",
- "workloadSlug": "workload3",
- "continent": "Europe",
- "workloadId": "id3",
- "cityCode": "ARN",
- "externalIpAddress": "20.0.0.3",
- "target": "target1",
- "stackSlug": "stack2",
- "ipAddress": "10.0.0.3",
- },
- {
- "name": "instance4",
- "countryCode": "US",
- "workloadSlug": "workload3",
- "continent": "America",
- "workloadId": "id4",
- "cityCode": "JFK",
- "externalIpAddress": "20.0.0.4",
- "target": "target2",
- "stackSlug": "stack2",
- "ipAddress": "10.0.0.4",
- },
- ]
- inventory.hostname_key = "externalIpAddress"
- inventory._populate(instances)
- # get different hosts
- host1 = inventory.inventory.get_host('20.0.0.1')
- host2 = inventory.inventory.get_host('20.0.0.2')
- host3 = inventory.inventory.get_host('20.0.0.3')
- host4 = inventory.inventory.get_host('20.0.0.4')
-
- # get different groups
- assert 'citycode_arn' in inventory.inventory.groups
- group_citycode_arn = inventory.inventory.groups['citycode_arn']
- assert 'countrycode_se' in inventory.inventory.groups
- group_countrycode_se = inventory.inventory.groups['countrycode_se']
- assert 'continent_america' in inventory.inventory.groups
- group_continent_america = inventory.inventory.groups['continent_america']
- assert 'name_instance1' in inventory.inventory.groups
- group_name_instance1 = inventory.inventory.groups['name_instance1']
- assert 'stackslug_stack1' in inventory.inventory.groups
- group_stackslug_stack1 = inventory.inventory.groups['stackslug_stack1']
- assert 'target_target1' in inventory.inventory.groups
- group_target_target1 = inventory.inventory.groups['target_target1']
- assert 'workloadslug_workload3' in inventory.inventory.groups
- group_workloadslug_workload3 = inventory.inventory.groups['workloadslug_workload3']
- assert 'workloadid_id1' in inventory.inventory.groups
- group_workloadid_id1 = inventory.inventory.groups['workloadid_id1']
-
- assert group_citycode_arn.hosts == [host1, host3]
- assert group_countrycode_se.hosts == [host1, host3]
- assert group_continent_america.hosts == [host2, host4]
- assert group_name_instance1.hosts == [host1]
- assert group_stackslug_stack1.hosts == [host1, host2]
- assert group_target_target1.hosts == [host1, host3]
- assert group_workloadslug_workload3.hosts == [host3, host4]
- assert group_workloadid_id1.hosts == [host1]
diff --git a/tests/unit/plugins/lookup/test_dependent.py b/tests/unit/plugins/lookup/test_dependent.py
index ad02cecf19..fef53dec00 100644
--- a/tests/unit/plugins/lookup/test_dependent.py
+++ b/tests/unit/plugins/lookup/test_dependent.py
@@ -9,10 +9,9 @@ from __future__ import absolute_import, division, print_function
__metaclass__ = type
+from ansible.template import Templar
+
from ansible_collections.community.internal_test_tools.tests.unit.compat.unittest import TestCase
-from ansible_collections.community.internal_test_tools.tests.unit.compat.mock import (
- MagicMock,
-)
from ansible_collections.community.internal_test_tools.tests.unit.utils.trust import make_trusted
from ansible.plugins.loader import lookup_loader
@@ -20,8 +19,7 @@ from ansible.plugins.loader import lookup_loader
class TestLookupModule(TestCase):
def setUp(self):
- templar = MagicMock()
- templar._loader = None
+ templar = Templar(loader=None)
self.lookup = lookup_loader.get("community.general.dependent", templar=templar)
def test_empty(self):
diff --git a/tests/unit/plugins/lookup/test_manifold.py b/tests/unit/plugins/lookup/test_manifold.py
deleted file mode 100644
index e8e63c9227..0000000000
--- a/tests/unit/plugins/lookup/test_manifold.py
+++ /dev/null
@@ -1,537 +0,0 @@
-# Copyright (c) 2018, Arigato Machine Inc.
-# Copyright (c) 2018, Ansible Project
-# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
-# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-from ansible_collections.community.internal_test_tools.tests.unit.compat import unittest
-from ansible_collections.community.internal_test_tools.tests.unit.compat.mock import patch, call
-from ansible.errors import AnsibleError
-from ansible.module_utils.urls import ConnectionError, SSLValidationError
-from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError
-from ansible.module_utils import six
-from ansible.plugins.loader import lookup_loader
-from ansible_collections.community.general.plugins.lookup.manifold import ManifoldApiClient, ApiError
-import json
-import os
-
-
-API_FIXTURES = {
- 'https://api.marketplace.manifold.co/v1/resources':
- [
- {
- "body": {
- "label": "resource-1",
- "name": "Resource 1"
- },
- "id": "rid-1"
- },
- {
- "body": {
- "label": "resource-2",
- "name": "Resource 2"
- },
- "id": "rid-2"
- }
- ],
- 'https://api.marketplace.manifold.co/v1/resources?label=resource-1':
- [
- {
- "body": {
- "label": "resource-1",
- "name": "Resource 1"
- },
- "id": "rid-1"
- }
- ],
- 'https://api.marketplace.manifold.co/v1/resources?label=resource-2':
- [
- {
- "body": {
- "label": "resource-2",
- "name": "Resource 2"
- },
- "id": "rid-2"
- }
- ],
- 'https://api.marketplace.manifold.co/v1/resources?team_id=tid-1':
- [
- {
- "body": {
- "label": "resource-1",
- "name": "Resource 1"
- },
- "id": "rid-1"
- }
- ],
- 'https://api.marketplace.manifold.co/v1/resources?project_id=pid-1':
- [
- {
- "body": {
- "label": "resource-2",
- "name": "Resource 2"
- },
- "id": "rid-2"
- }
- ],
- 'https://api.marketplace.manifold.co/v1/resources?project_id=pid-2':
- [
- {
- "body": {
- "label": "resource-1",
- "name": "Resource 1"
- },
- "id": "rid-1"
- },
- {
- "body": {
- "label": "resource-3",
- "name": "Resource 3"
- },
- "id": "rid-3"
- }
- ],
- 'https://api.marketplace.manifold.co/v1/resources?team_id=tid-1&project_id=pid-1':
- [
- {
- "body": {
- "label": "resource-1",
- "name": "Resource 1"
- },
- "id": "rid-1"
- }
- ],
- 'https://api.marketplace.manifold.co/v1/projects':
- [
- {
- "body": {
- "label": "project-1",
- "name": "Project 1",
- },
- "id": "pid-1",
- },
- {
- "body": {
- "label": "project-2",
- "name": "Project 2",
- },
- "id": "pid-2",
- }
- ],
- 'https://api.marketplace.manifold.co/v1/projects?label=project-2':
- [
- {
- "body": {
- "label": "project-2",
- "name": "Project 2",
- },
- "id": "pid-2",
- }
- ],
- 'https://api.marketplace.manifold.co/v1/credentials?resource_id=rid-1':
- [
- {
- "body": {
- "resource_id": "rid-1",
- "values": {
- "RESOURCE_TOKEN_1": "token-1",
- "RESOURCE_TOKEN_2": "token-2"
- }
- },
- "id": "cid-1",
- }
- ],
- 'https://api.marketplace.manifold.co/v1/credentials?resource_id=rid-2':
- [
- {
- "body": {
- "resource_id": "rid-2",
- "values": {
- "RESOURCE_TOKEN_3": "token-3",
- "RESOURCE_TOKEN_4": "token-4"
- }
- },
- "id": "cid-2",
- }
- ],
- 'https://api.marketplace.manifold.co/v1/credentials?resource_id=rid-3':
- [
- {
- "body": {
- "resource_id": "rid-3",
- "values": {
- "RESOURCE_TOKEN_1": "token-5",
- "RESOURCE_TOKEN_2": "token-6"
- }
- },
- "id": "cid-3",
- }
- ],
- 'https://api.identity.manifold.co/v1/teams':
- [
- {
- "id": "tid-1",
- "body": {
- "name": "Team 1",
- "label": "team-1"
- }
- },
- {
- "id": "tid-2",
- "body": {
- "name": "Team 2",
- "label": "team-2"
- }
- }
- ]
-}
-
-
-def mock_fixture(open_url_mock, fixture=None, data=None, headers=None):
- if not headers:
- headers = {}
- if fixture:
- data = json.dumps(API_FIXTURES[fixture])
- if 'content-type' not in headers:
- headers['content-type'] = 'application/json'
-
- open_url_mock.return_value.read.return_value = data
- open_url_mock.return_value.headers = headers
-
-
-class TestManifoldApiClient(unittest.TestCase):
- @patch('ansible_collections.community.general.plugins.lookup.manifold.open_url')
- def test_request_sends_default_headers(self, open_url_mock):
- mock_fixture(open_url_mock, data='hello')
- client = ManifoldApiClient('token-123')
- client.request('test', 'endpoint')
- open_url_mock.assert_called_with('https://api.test.manifold.co/v1/endpoint',
- headers={'Accept': '*/*', 'Authorization': 'Bearer token-123'},
- http_agent='python-manifold-ansible-1.0.0')
-
- @patch('ansible_collections.community.general.plugins.lookup.manifold.open_url')
- def test_request_decodes_json(self, open_url_mock):
- mock_fixture(open_url_mock, fixture='https://api.marketplace.manifold.co/v1/resources')
- client = ManifoldApiClient('token-123')
- self.assertIsInstance(client.request('marketplace', 'resources'), list)
-
- @patch('ansible_collections.community.general.plugins.lookup.manifold.open_url')
- def test_request_streams_text(self, open_url_mock):
- mock_fixture(open_url_mock, data='hello', headers={'content-type': "text/plain"})
- client = ManifoldApiClient('token-123')
- self.assertEqual('hello', client.request('test', 'endpoint'))
-
- @patch('ansible_collections.community.general.plugins.lookup.manifold.open_url')
- def test_request_processes_parameterized_headers(self, open_url_mock):
- mock_fixture(open_url_mock, data='hello')
- client = ManifoldApiClient('token-123')
- client.request('test', 'endpoint', headers={'X-HEADER': 'MANIFOLD'})
- open_url_mock.assert_called_with('https://api.test.manifold.co/v1/endpoint',
- headers={'Accept': '*/*', 'Authorization': 'Bearer token-123',
- 'X-HEADER': 'MANIFOLD'},
- http_agent='python-manifold-ansible-1.0.0')
-
- @patch('ansible_collections.community.general.plugins.lookup.manifold.open_url')
- def test_request_passes_arbitrary_parameters(self, open_url_mock):
- mock_fixture(open_url_mock, data='hello')
- client = ManifoldApiClient('token-123')
- client.request('test', 'endpoint', use_proxy=False, timeout=5)
- open_url_mock.assert_called_with('https://api.test.manifold.co/v1/endpoint',
- headers={'Accept': '*/*', 'Authorization': 'Bearer token-123'},
- http_agent='python-manifold-ansible-1.0.0',
- use_proxy=False, timeout=5)
-
- @patch('ansible_collections.community.general.plugins.lookup.manifold.open_url')
- def test_request_raises_on_incorrect_json(self, open_url_mock):
- mock_fixture(open_url_mock, data='noJson', headers={'content-type': "application/json"})
- client = ManifoldApiClient('token-123')
- with self.assertRaises(ApiError) as context:
- client.request('test', 'endpoint')
- self.assertEqual('JSON response can\'t be parsed while requesting https://api.test.manifold.co/v1/endpoint:\n'
- 'noJson',
- str(context.exception))
-
- @patch('ansible_collections.community.general.plugins.lookup.manifold.open_url')
- def test_request_raises_on_status_500(self, open_url_mock):
- open_url_mock.side_effect = HTTPError('https://api.test.manifold.co/v1/endpoint',
- 500, 'Server error', {}, six.StringIO('ERROR'))
- client = ManifoldApiClient('token-123')
- with self.assertRaises(ApiError) as context:
- client.request('test', 'endpoint')
- self.assertEqual('Server returned: HTTP Error 500: Server error while requesting '
- 'https://api.test.manifold.co/v1/endpoint:\nERROR',
- str(context.exception))
-
- @patch('ansible_collections.community.general.plugins.lookup.manifold.open_url')
- def test_request_raises_on_bad_url(self, open_url_mock):
- open_url_mock.side_effect = URLError('URL is invalid')
- client = ManifoldApiClient('token-123')
- with self.assertRaises(ApiError) as context:
- client.request('test', 'endpoint')
- self.assertEqual('Failed lookup url for https://api.test.manifold.co/v1/endpoint : ',
- str(context.exception))
-
- @patch('ansible_collections.community.general.plugins.lookup.manifold.open_url')
- def test_request_raises_on_ssl_error(self, open_url_mock):
- open_url_mock.side_effect = SSLValidationError('SSL Error')
- client = ManifoldApiClient('token-123')
- with self.assertRaises(ApiError) as context:
- client.request('test', 'endpoint')
- self.assertEqual('Error validating the server\'s certificate for https://api.test.manifold.co/v1/endpoint: '
- 'SSL Error',
- str(context.exception))
-
- @patch('ansible_collections.community.general.plugins.lookup.manifold.open_url')
- def test_request_raises_on_connection_error(self, open_url_mock):
- open_url_mock.side_effect = ConnectionError('Unknown connection error')
- client = ManifoldApiClient('token-123')
- with self.assertRaises(ApiError) as context:
- client.request('test', 'endpoint')
- self.assertEqual('Error connecting to https://api.test.manifold.co/v1/endpoint: Unknown connection error',
- str(context.exception))
-
- @patch('ansible_collections.community.general.plugins.lookup.manifold.open_url')
- def test_get_resources_get_all(self, open_url_mock):
- url = 'https://api.marketplace.manifold.co/v1/resources'
- mock_fixture(open_url_mock, fixture=url)
- client = ManifoldApiClient('token-123')
- self.assertListEqual(API_FIXTURES[url], client.get_resources())
- open_url_mock.assert_called_with(url,
- headers={'Accept': '*/*', 'Authorization': 'Bearer token-123'},
- http_agent='python-manifold-ansible-1.0.0')
-
- @patch('ansible_collections.community.general.plugins.lookup.manifold.open_url')
- def test_get_resources_filter_label(self, open_url_mock):
- url = 'https://api.marketplace.manifold.co/v1/resources?label=resource-1'
- mock_fixture(open_url_mock, fixture=url)
- client = ManifoldApiClient('token-123')
- self.assertListEqual(API_FIXTURES[url], client.get_resources(label='resource-1'))
- open_url_mock.assert_called_with(url,
- headers={'Accept': '*/*', 'Authorization': 'Bearer token-123'},
- http_agent='python-manifold-ansible-1.0.0')
-
- @patch('ansible_collections.community.general.plugins.lookup.manifold.open_url')
- def test_get_resources_filter_team_and_project(self, open_url_mock):
- url = 'https://api.marketplace.manifold.co/v1/resources?team_id=tid-1&project_id=pid-1'
- mock_fixture(open_url_mock, fixture=url)
- client = ManifoldApiClient('token-123')
- self.assertListEqual(API_FIXTURES[url], client.get_resources(team_id='tid-1', project_id='pid-1'))
- args, kwargs = open_url_mock.call_args
- url_called = args[0]
- # Dict order is not guaranteed, so an url may have querystring parameters order randomized
- self.assertIn('team_id=tid-1', url_called)
- self.assertIn('project_id=pid-1', url_called)
-
- @patch('ansible_collections.community.general.plugins.lookup.manifold.open_url')
- def test_get_teams_get_all(self, open_url_mock):
- url = 'https://api.identity.manifold.co/v1/teams'
- mock_fixture(open_url_mock, fixture=url)
- client = ManifoldApiClient('token-123')
- self.assertListEqual(API_FIXTURES[url], client.get_teams())
- open_url_mock.assert_called_with(url,
- headers={'Accept': '*/*', 'Authorization': 'Bearer token-123'},
- http_agent='python-manifold-ansible-1.0.0')
-
- @patch('ansible_collections.community.general.plugins.lookup.manifold.open_url')
- def test_get_teams_filter_label(self, open_url_mock):
- url = 'https://api.identity.manifold.co/v1/teams'
- mock_fixture(open_url_mock, fixture=url)
- client = ManifoldApiClient('token-123')
- self.assertListEqual(API_FIXTURES[url][1:2], client.get_teams(label='team-2'))
- open_url_mock.assert_called_with(url,
- headers={'Accept': '*/*', 'Authorization': 'Bearer token-123'},
- http_agent='python-manifold-ansible-1.0.0')
-
- @patch('ansible_collections.community.general.plugins.lookup.manifold.open_url')
- def test_get_projects_get_all(self, open_url_mock):
- url = 'https://api.marketplace.manifold.co/v1/projects'
- mock_fixture(open_url_mock, fixture=url)
- client = ManifoldApiClient('token-123')
- self.assertListEqual(API_FIXTURES[url], client.get_projects())
- open_url_mock.assert_called_with(url,
- headers={'Accept': '*/*', 'Authorization': 'Bearer token-123'},
- http_agent='python-manifold-ansible-1.0.0')
-
- @patch('ansible_collections.community.general.plugins.lookup.manifold.open_url')
- def test_get_projects_filter_label(self, open_url_mock):
- url = 'https://api.marketplace.manifold.co/v1/projects?label=project-2'
- mock_fixture(open_url_mock, fixture=url)
- client = ManifoldApiClient('token-123')
- self.assertListEqual(API_FIXTURES[url], client.get_projects(label='project-2'))
- open_url_mock.assert_called_with(url,
- headers={'Accept': '*/*', 'Authorization': 'Bearer token-123'},
- http_agent='python-manifold-ansible-1.0.0')
-
- @patch('ansible_collections.community.general.plugins.lookup.manifold.open_url')
- def test_get_credentials(self, open_url_mock):
- url = 'https://api.marketplace.manifold.co/v1/credentials?resource_id=rid-1'
- mock_fixture(open_url_mock, fixture=url)
- client = ManifoldApiClient('token-123')
- self.assertListEqual(API_FIXTURES[url], client.get_credentials(resource_id='rid-1'))
- open_url_mock.assert_called_with(url,
- headers={'Accept': '*/*', 'Authorization': 'Bearer token-123'},
- http_agent='python-manifold-ansible-1.0.0')
-
-
-class TestLookupModule(unittest.TestCase):
- def setUp(self):
- self.lookup = lookup_loader.get('community.general.manifold')
-
- @patch('ansible_collections.community.general.plugins.lookup.manifold.ManifoldApiClient')
- def test_get_all(self, client_mock):
- expected_result = [{'RESOURCE_TOKEN_1': 'token-1',
- 'RESOURCE_TOKEN_2': 'token-2',
- 'RESOURCE_TOKEN_3': 'token-3',
- 'RESOURCE_TOKEN_4': 'token-4'
- }]
- client_mock.return_value.get_resources.return_value = API_FIXTURES['https://api.marketplace.manifold.co/v1/resources']
- client_mock.return_value.get_credentials.side_effect = lambda x: API_FIXTURES['https://api.marketplace.manifold.co/v1/'
- 'credentials?resource_id={0}'.format(x)]
- self.assertListEqual(expected_result, self.lookup.run([], api_token='token-123'))
- client_mock.assert_called_with('token-123')
- client_mock.return_value.get_resources.assert_called_with(team_id=None, project_id=None)
-
- @patch('ansible_collections.community.general.plugins.lookup.manifold.ManifoldApiClient')
- def test_get_one_resource(self, client_mock):
- expected_result = [{'RESOURCE_TOKEN_3': 'token-3',
- 'RESOURCE_TOKEN_4': 'token-4'
- }]
- client_mock.return_value.get_resources.return_value = API_FIXTURES['https://api.marketplace.manifold.co/v1/resources?label=resource-2']
- client_mock.return_value.get_credentials.side_effect = lambda x: API_FIXTURES['https://api.marketplace.manifold.co/v1/'
- 'credentials?resource_id={0}'.format(x)]
- self.assertListEqual(expected_result, self.lookup.run(['resource-2'], api_token='token-123'))
- client_mock.return_value.get_resources.assert_called_with(team_id=None, project_id=None, label='resource-2')
-
- @patch('ansible_collections.community.general.plugins.lookup.manifold.ManifoldApiClient')
- def test_get_two_resources(self, client_mock):
- expected_result = [{'RESOURCE_TOKEN_1': 'token-1',
- 'RESOURCE_TOKEN_2': 'token-2',
- 'RESOURCE_TOKEN_3': 'token-3',
- 'RESOURCE_TOKEN_4': 'token-4'
- }]
- client_mock.return_value.get_resources.return_value = API_FIXTURES['https://api.marketplace.manifold.co/v1/resources']
- client_mock.return_value.get_credentials.side_effect = lambda x: API_FIXTURES['https://api.marketplace.manifold.co/v1/'
- 'credentials?resource_id={0}'.format(x)]
- self.assertListEqual(expected_result, self.lookup.run(['resource-1', 'resource-2'], api_token='token-123'))
- client_mock.assert_called_with('token-123')
- client_mock.return_value.get_resources.assert_called_with(team_id=None, project_id=None)
-
- @patch('ansible_collections.community.general.plugins.lookup.manifold.display')
- @patch('ansible_collections.community.general.plugins.lookup.manifold.ManifoldApiClient')
- def test_get_resources_with_same_credential_names(self, client_mock, display_mock):
- expected_result = [{'RESOURCE_TOKEN_1': 'token-5',
- 'RESOURCE_TOKEN_2': 'token-6'
- }]
- client_mock.return_value.get_resources.return_value = API_FIXTURES['https://api.marketplace.manifold.co/v1/resources?project_id=pid-2']
- client_mock.return_value.get_projects.return_value = API_FIXTURES['https://api.marketplace.manifold.co/v1/projects?label=project-2']
- client_mock.return_value.get_credentials.side_effect = lambda x: API_FIXTURES['https://api.marketplace.manifold.co/v1/'
- 'credentials?resource_id={0}'.format(x)]
- self.assertListEqual(expected_result, self.lookup.run([], api_token='token-123', project='project-2'))
- client_mock.assert_called_with('token-123')
- display_mock.warning.assert_has_calls([
- call("'RESOURCE_TOKEN_1' with label 'resource-1' was replaced by resource data with label 'resource-3'"),
- call("'RESOURCE_TOKEN_2' with label 'resource-1' was replaced by resource data with label 'resource-3'")],
- any_order=True
- )
- client_mock.return_value.get_resources.assert_called_with(team_id=None, project_id='pid-2')
-
- @patch('ansible_collections.community.general.plugins.lookup.manifold.ManifoldApiClient')
- def test_filter_by_team(self, client_mock):
- expected_result = [{'RESOURCE_TOKEN_1': 'token-1',
- 'RESOURCE_TOKEN_2': 'token-2'
- }]
- client_mock.return_value.get_resources.return_value = API_FIXTURES['https://api.marketplace.manifold.co/v1/resources?team_id=tid-1']
- client_mock.return_value.get_teams.return_value = API_FIXTURES['https://api.identity.manifold.co/v1/teams'][0:1]
- client_mock.return_value.get_credentials.side_effect = lambda x: API_FIXTURES['https://api.marketplace.manifold.co/v1/'
- 'credentials?resource_id={0}'.format(x)]
- self.assertListEqual(expected_result, self.lookup.run([], api_token='token-123', team='team-1'))
- client_mock.assert_called_with('token-123')
- client_mock.return_value.get_resources.assert_called_with(team_id='tid-1', project_id=None)
-
- @patch('ansible_collections.community.general.plugins.lookup.manifold.ManifoldApiClient')
- def test_filter_by_project(self, client_mock):
- expected_result = [{'RESOURCE_TOKEN_3': 'token-3',
- 'RESOURCE_TOKEN_4': 'token-4'
- }]
- client_mock.return_value.get_resources.return_value = API_FIXTURES['https://api.marketplace.manifold.co/v1/resources?project_id=pid-1']
- client_mock.return_value.get_projects.return_value = API_FIXTURES['https://api.marketplace.manifold.co/v1/projects'][0:1]
- client_mock.return_value.get_credentials.side_effect = lambda x: API_FIXTURES['https://api.marketplace.manifold.co/v1/'
- 'credentials?resource_id={0}'.format(x)]
- self.assertListEqual(expected_result, self.lookup.run([], api_token='token-123', project='project-1'))
- client_mock.assert_called_with('token-123')
- client_mock.return_value.get_resources.assert_called_with(team_id=None, project_id='pid-1')
-
- @patch('ansible_collections.community.general.plugins.lookup.manifold.ManifoldApiClient')
- def test_filter_by_team_and_project(self, client_mock):
- expected_result = [{'RESOURCE_TOKEN_1': 'token-1',
- 'RESOURCE_TOKEN_2': 'token-2'
- }]
- client_mock.return_value.get_resources.return_value = API_FIXTURES['https://api.marketplace.manifold.co/v1/resources?team_id=tid-1&project_id=pid-1']
- client_mock.return_value.get_teams.return_value = API_FIXTURES['https://api.identity.manifold.co/v1/teams'][0:1]
- client_mock.return_value.get_projects.return_value = API_FIXTURES['https://api.marketplace.manifold.co/v1/projects'][0:1]
- client_mock.return_value.get_credentials.side_effect = lambda x: API_FIXTURES['https://api.marketplace.manifold.co/v1/'
- 'credentials?resource_id={0}'.format(x)]
- self.assertListEqual(expected_result, self.lookup.run([], api_token='token-123', project='project-1'))
- client_mock.assert_called_with('token-123')
- client_mock.return_value.get_resources.assert_called_with(team_id=None, project_id='pid-1')
-
- @patch('ansible_collections.community.general.plugins.lookup.manifold.ManifoldApiClient')
- def test_raise_team_doesnt_exist(self, client_mock):
- client_mock.return_value.get_teams.return_value = []
- with self.assertRaises(AnsibleError) as context:
- self.lookup.run([], api_token='token-123', team='no-team')
- self.assertEqual("Team 'no-team' does not exist",
- str(context.exception))
-
- @patch('ansible_collections.community.general.plugins.lookup.manifold.ManifoldApiClient')
- def test_raise_project_doesnt_exist(self, client_mock):
- client_mock.return_value.get_projects.return_value = []
- with self.assertRaises(AnsibleError) as context:
- self.lookup.run([], api_token='token-123', project='no-project')
- self.assertEqual("Project 'no-project' does not exist",
- str(context.exception))
-
- @patch('ansible_collections.community.general.plugins.lookup.manifold.ManifoldApiClient')
- def test_raise_resource_doesnt_exist(self, client_mock):
- client_mock.return_value.get_resources.return_value = API_FIXTURES['https://api.marketplace.manifold.co/v1/resources']
- with self.assertRaises(AnsibleError) as context:
- self.lookup.run(['resource-1', 'no-resource-1', 'no-resource-2'], api_token='token-123')
- self.assertEqual("Resource(s) no-resource-1, no-resource-2 do not exist",
- str(context.exception))
-
- @patch('ansible_collections.community.general.plugins.lookup.manifold.ManifoldApiClient')
- def test_catch_api_error(self, client_mock):
- client_mock.side_effect = ApiError('Generic error')
- with self.assertRaises(AnsibleError) as context:
- self.lookup.run([], api_token='token-123')
- self.assertEqual("API Error: Generic error",
- str(context.exception))
-
- @patch('ansible_collections.community.general.plugins.lookup.manifold.ManifoldApiClient')
- def test_catch_unhandled_exception(self, client_mock):
- client_mock.side_effect = Exception('Unknown error')
- with self.assertRaises(AnsibleError) as context:
- self.lookup.run([], api_token='token-123')
- self.assertTrue('Exception: Unknown error' in str(context.exception))
-
- @patch('ansible_collections.community.general.plugins.lookup.manifold.ManifoldApiClient')
- def test_falls_back_to_env_var(self, client_mock):
- client_mock.return_value.get_resources.return_value = []
- client_mock.return_value.get_credentials.return_value = []
- try:
- os.environ['MANIFOLD_API_TOKEN'] = 'token-321'
- self.lookup.run([])
- finally:
- os.environ.pop('MANIFOLD_API_TOKEN', None)
- client_mock.assert_called_with('token-321')
-
- @patch('ansible_collections.community.general.plugins.lookup.manifold.ManifoldApiClient')
- def test_falls_raises_on_no_token(self, client_mock):
- client_mock.return_value.get_resources.return_value = []
- client_mock.return_value.get_credentials.return_value = []
- os.environ.pop('MANIFOLD_API_TOKEN', None)
- with self.assertRaises(AnsibleError) as context:
- self.lookup.run([])
- assert 'api_token' in str(context.exception)
diff --git a/tests/unit/plugins/module_utils/net_tools/pritunl/test_api.py b/tests/unit/plugins/module_utils/net_tools/pritunl/test_api.py
index 6ddc827a14..a176a55768 100644
--- a/tests/unit/plugins/module_utils/net_tools/pritunl/test_api.py
+++ b/tests/unit/plugins/module_utils/net_tools/pritunl/test_api.py
@@ -13,7 +13,7 @@ from ansible.module_utils.six import iteritems
from ansible_collections.community.general.plugins.module_utils.net_tools.pritunl import (
api,
)
-from mock import MagicMock
+from ansible_collections.community.internal_test_tools.tests.unit.compat.mock import MagicMock
__metaclass__ = type
diff --git a/tests/unit/plugins/module_utils/test_module_helper.py b/tests/unit/plugins/module_utils/test_module_helper.py
index cbcdaae788..a135a20d0a 100644
--- a/tests/unit/plugins/module_utils/test_module_helper.py
+++ b/tests/unit/plugins/module_utils/test_module_helper.py
@@ -10,123 +10,13 @@ __metaclass__ = type
import pytest
from ansible_collections.community.general.plugins.module_utils.module_helper import (
- DependencyCtxMgr, VarMeta, VarDict, cause_changes
+ cause_changes
)
-# remove in 11.0.0
-def test_dependency_ctxmgr():
- ctx = DependencyCtxMgr("POTATOES", "Potatoes must be installed")
- with ctx:
- import potatoes_that_will_never_be_there # noqa: F401, pylint: disable=unused-import
- print("POTATOES: ctx.text={0}".format(ctx.text))
- assert ctx.text == "Potatoes must be installed"
- assert not ctx.has_it
-
- ctx = DependencyCtxMgr("POTATOES2")
- with ctx:
- import potatoes_that_will_never_be_there_again # noqa: F401, pylint: disable=unused-import
- assert not ctx.has_it
- print("POTATOES2: ctx.text={0}".format(ctx.text))
- assert ctx.text.startswith("No module named")
- assert "potatoes_that_will_never_be_there_again" in ctx.text
-
- ctx = DependencyCtxMgr("TYPING")
- with ctx:
- import sys # noqa: F401, pylint: disable=unused-import
- assert ctx.has_it
-
-
-# remove in 11.0.0
-def test_variable_meta():
- meta = VarMeta()
- assert meta.output is True
- assert meta.diff is False
- assert meta.value is None
- meta.set_value("abc")
- assert meta.initial_value == "abc"
- assert meta.value == "abc"
- assert meta.diff_result is None
- meta.set_value("def")
- assert meta.initial_value == "abc"
- assert meta.value == "def"
- assert meta.diff_result is None
-
-
-# remove in 11.0.0
-def test_variable_meta_diff():
- meta = VarMeta(diff=True)
- assert meta.output is True
- assert meta.diff is True
- assert meta.value is None
- meta.set_value("abc")
- assert meta.initial_value == "abc"
- assert meta.value == "abc"
- assert meta.diff_result is None
- meta.set_value("def")
- assert meta.initial_value == "abc"
- assert meta.value == "def"
- assert meta.diff_result == {"before": "abc", "after": "def"}
- meta.set_value("ghi")
- assert meta.initial_value == "abc"
- assert meta.value == "ghi"
- assert meta.diff_result == {"before": "abc", "after": "ghi"}
-
-
-# remove in 11.0.0
-def test_vardict():
- vd = VarDict()
- vd.set('a', 123)
- assert vd['a'] == 123
- assert vd.a == 123
- assert 'a' in vd._meta
- assert vd.meta('a').output is True
- assert vd.meta('a').diff is False
- assert vd.meta('a').change is False
- vd['b'] = 456
- assert vd.meta('b').output is True
- assert vd.meta('b').diff is False
- assert vd.meta('b').change is False
- vd.set_meta('a', diff=True, change=True)
- vd.set_meta('b', diff=True, output=False)
- vd['c'] = 789
- assert vd.has_changed('c') is False
- vd['a'] = 'new_a'
- assert vd.has_changed('a') is True
- vd['c'] = 'new_c'
- assert vd.has_changed('c') is False
- vd['b'] = 'new_b'
- assert vd.has_changed('b') is False
- assert vd.a == 'new_a'
- assert vd.c == 'new_c'
- assert vd.output() == {'a': 'new_a', 'c': 'new_c'}
- assert vd.diff() == {'before': {'a': 123}, 'after': {'a': 'new_a'}}, "diff={0}".format(vd.diff())
-
-
-# remove in 11.0.0
-def test_variable_meta_change():
- vd = VarDict()
- vd.set('a', 123, change=True)
- vd.set('b', [4, 5, 6], change=True)
- vd.set('c', {'m': 7, 'n': 8, 'o': 9}, change=True)
- vd.set('d', {'a1': {'a11': 33, 'a12': 34}}, change=True)
-
- vd.a = 1234
- assert vd.has_changed('a') is True
- vd.b.append(7)
- assert vd.b == [4, 5, 6, 7]
- assert vd.has_changed('b')
- vd.c.update({'p': 10})
- assert vd.c == {'m': 7, 'n': 8, 'o': 9, 'p': 10}
- assert vd.has_changed('c')
- vd.d['a1'].update({'a13': 35})
- assert vd.d == {'a1': {'a11': 33, 'a12': 34, 'a13': 35}}
- assert vd.has_changed('d')
-
-
#
# DEPRECATION NOTICE
-# Parameters on_success and on_failure are deprecated and will be removed in community.genral 12.0.0
+# Parameters on_success and on_failure are deprecated and will be removed in community.general 12.0.0
# Remove testcases with those params when releasing 12.0.0
#
CAUSE_CHG_DECO_PARAMS = ['deco_args', 'expect_exception', 'expect_changed']
diff --git a/tests/unit/plugins/module_utils/xenserver/conftest.py b/tests/unit/plugins/module_utils/xenserver/conftest.py
index 3fcea55617..6190bb1ea0 100644
--- a/tests/unit/plugins/module_utils/xenserver/conftest.py
+++ b/tests/unit/plugins/module_utils/xenserver/conftest.py
@@ -16,7 +16,7 @@ import pytest
from .FakeAnsibleModule import FakeAnsibleModule
from ansible.module_utils import six
-from mock import MagicMock
+from ansible_collections.community.internal_test_tools.tests.unit.compat.mock import MagicMock
@pytest.fixture
diff --git a/tests/unit/plugins/modules/gitlab.py b/tests/unit/plugins/modules/gitlab.py
index d388a5c66b..a66ecf856f 100644
--- a/tests/unit/plugins/modules/gitlab.py
+++ b/tests/unit/plugins/modules/gitlab.py
@@ -287,11 +287,36 @@ def resp_delete_group(url, request):
@urlmatch(scheme="http", netloc="localhost", path="/api/v4/groups/1/access_tokens", method="get")
def resp_list_group_access_tokens(url, request):
headers = {'content-type': 'application/json'}
- content = ('[{"user_id" : 1, "scopes" : ["api"], "name" : "token1", "expires_at" : "2021-01-31",'
- '"id" : 1, "active" : false, "created_at" : "2021-01-20T22:11:48.151Z", "revoked" : true,'
- '"access_level": 40},{"user_id" : 2, "scopes" : ["api"], "name" : "token2", "expires_at" : "2021-02-31",'
- '"id" : 2, "active" : true, "created_at" : "2021-02-20T22:11:48.151Z", "revoked" : false,'
- '"access_level": 40}]')
+ content = (
+ '[{"id":689,"name":"test-token","revoked":true,"created_at":"2025-06-02T09:18:01.484Z",'
+ '"description":null,"scopes":["read_repository","write_repository"],"user_id":1779,'
+ '"last_used_at":null,"active":false,"expires_at":"2025-07-02","access_level":40,'
+ '"resource_type":"group","resource_id":1730},'
+ '{"id":690,"name":"test-token","revoked":true,"created_at":"2025-06-02T09:36:30.650Z",'
+ '"description":null,"scopes":["read_repository","write_repository"],"user_id":1780,'
+ '"last_used_at":null,"active":false,"expires_at":"2025-07-02","access_level":40,'
+ '"resource_type":"group","resource_id":1730},'
+ '{"id":691,"name":"test-token","revoked":false,"created_at":"2025-06-02T09:39:18.252Z",'
+ '"description":null,"scopes":["read_repository","write_repository"],"user_id":1781,'
+ '"last_used_at":null,"active":false,"expires_at":"2025-07-02","access_level":40,'
+ '"resource_type":"group","resource_id":1730},'
+ '{"id":695,"name":"test-token-no-revoked","created_at":"2025-06-02T09:39:18.252Z",'
+ '"description":null,"scopes":["read_repository","write_repository"],"user_id":1781,'
+ '"last_used_at":null,"active":false,"expires_at":"2025-07-02","access_level":40,'
+ '"resource_type":"group","resource_id":1730},'
+ '{"id":692,"name":"test-token-two","revoked":true,"created_at":"2025-06-02T09:41:18.442Z",'
+ '"description":null,"scopes":["read_repository","write_repository"],"user_id":1782,'
+ '"last_used_at":null,"active":false,"expires_at":"2025-07-02","access_level":40,'
+ '"resource_type":"group","resource_id":1730},'
+ '{"id":693,"name":"test-token-three","revoked":true,"created_at":"2025-06-02T09:50:00.976Z"'
+ ',"description":null,"scopes":["read_repository","write_repository"],"user_id":1783,'
+ '"last_used_at":null,"active":false,"expires_at":"2025-06-04","access_level":40,'
+ '"resource_type":"group","resource_id":1730},'
+ '{"id":694,"name":"test-token-three","revoked":true,"created_at":"2025-06-02T09:56:45.779Z"'
+ ',"description":null,"scopes":["read_repository","write_repository"],"user_id":1784,'
+ '"last_used_at":null,"active":false,"expires_at":"2025-06-04","access_level":40,'
+ '"resource_type":"group","resource_id":1730}]'
+ )
content = content.encode("utf-8")
return response(200, content, headers, None, 5, request)
@@ -306,7 +331,7 @@ def resp_create_group_access_tokens(url, request):
return response(201, content, headers, None, 5, request)
-@urlmatch(scheme="http", netloc="localhost", path="/api/v4/groups/1/access_tokens/1", method="delete")
+@urlmatch(scheme="http", netloc="localhost", path="/api/v4/groups/1/access_tokens/[0-9]+", method="delete")
def resp_revoke_group_access_tokens(url, request):
headers = {'content-type': 'application/json'}
content = ('')
@@ -567,11 +592,36 @@ def resp_delete_protected_branch(url, request):
@urlmatch(scheme="http", netloc="localhost", path="/api/v4/projects/1/access_tokens", method="get")
def resp_list_project_access_tokens(url, request):
headers = {'content-type': 'application/json'}
- content = ('[{"user_id" : 1, "scopes" : ["api"], "name" : "token1", "expires_at" : "2021-01-31",'
- '"id" : 1, "active" : false, "created_at" : "2021-01-20T22:11:48.151Z", "revoked" : true,'
- '"access_level": 40},{"user_id" : 2, "scopes" : ["api"], "name" : "token2", "expires_at" : "2021-02-31",'
- '"id" : 2, "active" : true, "created_at" : "2021-02-20T22:11:48.151Z", "revoked" : false,'
- '"access_level": 40}]')
+ content = (
+ '[{"id":689,"name":"test-token","revoked":true,"created_at":"2025-06-02T09:18:01.484Z",'
+ '"description":null,"scopes":["read_repository","write_repository"],"user_id":1779,'
+ '"last_used_at":null,"active":false,"expires_at":"2025-07-02","access_level":40,'
+ '"resource_type":"project","resource_id":1730},'
+ '{"id":690,"name":"test-token","revoked":true,"created_at":"2025-06-02T09:36:30.650Z",'
+ '"description":null,"scopes":["read_repository","write_repository"],"user_id":1780,'
+ '"last_used_at":null,"active":false,"expires_at":"2025-07-02","access_level":40,'
+ '"resource_type":"project","resource_id":1730},'
+ '{"id":691,"name":"test-token","revoked":false,"created_at":"2025-06-02T09:39:18.252Z",'
+ '"description":null,"scopes":["read_repository","write_repository"],"user_id":1781,'
+ '"last_used_at":null,"active":false,"expires_at":"2025-07-02","access_level":40,'
+ '"resource_type":"project","resource_id":1730},'
+ '{"id":695,"name":"test-token-no-revoked","created_at":"2025-06-02T09:39:18.252Z",'
+ '"description":null,"scopes":["read_repository","write_repository"],"user_id":1781,'
+ '"last_used_at":null,"active":false,"expires_at":"2025-07-02","access_level":40,'
+ '"resource_type":"group","resource_id":1730},'
+ '{"id":692,"name":"test-token-two","revoked":true,"created_at":"2025-06-02T09:41:18.442Z",'
+ '"description":null,"scopes":["read_repository","write_repository"],"user_id":1782,'
+ '"last_used_at":null,"active":false,"expires_at":"2025-07-02","access_level":40,'
+ '"resource_type":"project","resource_id":1730},'
+ '{"id":693,"name":"test-token-three","revoked":true,"created_at":"2025-06-02T09:50:00.976Z"'
+ ',"description":null,"scopes":["read_repository","write_repository"],"user_id":1783,'
+ '"last_used_at":null,"active":false,"expires_at":"2025-06-04","access_level":40,'
+ '"resource_type":"project","resource_id":1730},'
+ '{"id":694,"name":"test-token-three","revoked":true,"created_at":"2025-06-02T09:56:45.779Z"'
+ ',"description":null,"scopes":["read_repository","write_repository"],"user_id":1784,'
+ '"last_used_at":null,"active":false,"expires_at":"2025-06-04","access_level":40,'
+ '"resource_type":"project","resource_id":1730}]'
+ )
content = content.encode("utf-8")
return response(200, content, headers, None, 5, request)
@@ -586,7 +636,7 @@ def resp_create_project_access_tokens(url, request):
return response(201, content, headers, None, 5, request)
-@urlmatch(scheme="http", netloc="localhost", path="/api/v4/projects/1/access_tokens/1", method="delete")
+@urlmatch(scheme="http", netloc="localhost", path="/api/v4/projects/1/access_tokens/[0-9]+", method="delete")
def resp_revoke_project_access_tokens(url, request):
headers = {'content-type': 'application/json'}
content = ('')
diff --git a/tests/unit/plugins/modules/test_github_repo.py b/tests/unit/plugins/modules/test_github_repo.py
index bbb1f624f4..9a76fc4b69 100644
--- a/tests/unit/plugins/modules/test_github_repo.py
+++ b/tests/unit/plugins/modules/test_github_repo.py
@@ -7,12 +7,13 @@ __metaclass__ = type
import re
import json
-import sys
+import pytest
from httmock import with_httmock, urlmatch, response
from ansible_collections.community.internal_test_tools.tests.unit.compat import unittest
from ansible_collections.community.general.plugins.modules import github_repo
-GITHUB_MINIMUM_PYTHON_VERSION = (2, 7)
+
+pytest.importorskip('github')
@urlmatch(netloc=r'.*')
@@ -167,11 +168,6 @@ def delete_repo_notfound_mock(url, request):
class TestGithubRepo(unittest.TestCase):
- def setUp(self):
- if sys.version_info < GITHUB_MINIMUM_PYTHON_VERSION:
- self.skipTest("Python %s+ is needed for PyGithub" %
- ",".join(map(str, GITHUB_MINIMUM_PYTHON_VERSION)))
-
@with_httmock(get_orgs_mock)
@with_httmock(get_repo_notfound_mock)
@with_httmock(create_new_org_repo_mock)
diff --git a/tests/unit/plugins/modules/test_gitlab_group_access_token.py b/tests/unit/plugins/modules/test_gitlab_group_access_token.py
index 06af948204..cc7644060f 100644
--- a/tests/unit/plugins/modules/test_gitlab_group_access_token.py
+++ b/tests/unit/plugins/modules/test_gitlab_group_access_token.py
@@ -68,9 +68,33 @@ class TestGitlabGroupAccessToken(GitlabModuleTestCase):
group = self.gitlab_instance.groups.get(1)
self.assertIsNotNone(group)
- rvalue = self.moduleUtil.find_access_token(group, "token1")
+ rvalue = self.moduleUtil.find_access_token(group, "test-token")
self.assertEqual(rvalue, False)
self.assertIsNotNone(self.moduleUtil.access_token_object)
+ self.assertEqual(self.moduleUtil.access_token_object.id, 691)
+ self.assertFalse(self.moduleUtil.access_token_object.revoked)
+
+ @with_httmock(resp_get_group)
+ @with_httmock(resp_list_group_access_tokens)
+ def test_find_access_token_old_format(self):
+ group = self.gitlab_instance.groups.get(1)
+ self.assertIsNotNone(group)
+
+ rvalue = self.moduleUtil.find_access_token(group, "test-token-no-revoked")
+ self.assertEqual(rvalue, False)
+ self.assertIsNotNone(self.moduleUtil.access_token_object)
+ self.assertEqual(self.moduleUtil.access_token_object.id, 695)
+ self.assertFalse(hasattr(self.moduleUtil.access_token_object, "revoked"))
+
+ @with_httmock(resp_get_group)
+ @with_httmock(resp_list_group_access_tokens)
+ def test_find_revoked_access_token(self):
+ group = self.gitlab_instance.groups.get(1)
+ self.assertIsNotNone(group)
+
+ rvalue = self.moduleUtil.find_access_token(group, "test-token-three")
+ self.assertEqual(rvalue, False)
+ self.assertIsNone(self.moduleUtil.access_token_object)
@with_httmock(resp_get_group)
@with_httmock(resp_list_group_access_tokens)
@@ -99,7 +123,7 @@ class TestGitlabGroupAccessToken(GitlabModuleTestCase):
groups = self.gitlab_instance.groups.get(1)
self.assertIsNotNone(groups)
- rvalue = self.moduleUtil.find_access_token(groups, "token1")
+ rvalue = self.moduleUtil.find_access_token(groups, "test-token")
self.assertEqual(rvalue, False)
self.assertIsNotNone(self.moduleUtil.access_token_object)
diff --git a/tests/unit/plugins/modules/test_gitlab_project_access_token.py b/tests/unit/plugins/modules/test_gitlab_project_access_token.py
index ebc324b889..050c2435fa 100644
--- a/tests/unit/plugins/modules/test_gitlab_project_access_token.py
+++ b/tests/unit/plugins/modules/test_gitlab_project_access_token.py
@@ -68,9 +68,33 @@ class TestGitlabProjectAccessToken(GitlabModuleTestCase):
project = self.gitlab_instance.projects.get(1)
self.assertIsNotNone(project)
- rvalue = self.moduleUtil.find_access_token(project, "token1")
+ rvalue = self.moduleUtil.find_access_token(project, "test-token")
self.assertEqual(rvalue, False)
self.assertIsNotNone(self.moduleUtil.access_token_object)
+ self.assertEqual(self.moduleUtil.access_token_object.id, 691)
+ self.assertFalse(self.moduleUtil.access_token_object.revoked)
+
+ @with_httmock(resp_get_project)
+ @with_httmock(resp_list_project_access_tokens)
+ def test_find_access_token_old_format(self):
+ project = self.gitlab_instance.projects.get(1)
+ self.assertIsNotNone(project)
+
+ rvalue = self.moduleUtil.find_access_token(project, "test-token-no-revoked")
+ self.assertEqual(rvalue, False)
+ self.assertIsNotNone(self.moduleUtil.access_token_object)
+ self.assertEqual(self.moduleUtil.access_token_object.id, 695)
+ self.assertFalse(hasattr(self.moduleUtil.access_token_object, "revoked"))
+
+ @with_httmock(resp_get_project)
+ @with_httmock(resp_list_project_access_tokens)
+ def test_find_revoked_access_token(self):
+ project = self.gitlab_instance.projects.get(1)
+ self.assertIsNotNone(project)
+
+ rvalue = self.moduleUtil.find_access_token(project, "test-token-three")
+ self.assertEqual(rvalue, False)
+ self.assertIsNone(self.moduleUtil.access_token_object)
@with_httmock(resp_get_project)
@with_httmock(resp_list_project_access_tokens)
@@ -99,7 +123,7 @@ class TestGitlabProjectAccessToken(GitlabModuleTestCase):
project = self.gitlab_instance.projects.get(1)
self.assertIsNotNone(project)
- rvalue = self.moduleUtil.find_access_token(project, "token1")
+ rvalue = self.moduleUtil.find_access_token(project, "test-token")
self.assertEqual(rvalue, False)
self.assertIsNotNone(self.moduleUtil.access_token_object)
diff --git a/tests/unit/plugins/modules/test_jenkins_credential.py b/tests/unit/plugins/modules/test_jenkins_credential.py
new file mode 100644
index 0000000000..b74b7c4b59
--- /dev/null
+++ b/tests/unit/plugins/modules/test_jenkins_credential.py
@@ -0,0 +1,348 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+from ansible_collections.community.general.plugins.modules import jenkins_credential
+from ansible_collections.community.internal_test_tools.tests.unit.compat.mock import (
+ MagicMock,
+ patch,
+ mock_open,
+)
+
+import json
+import sys
+
+if sys.version_info[0] == 3:
+ import builtins
+ open_path = "builtins.open"
+else:
+ import __builtin__ as builtins
+ open_path = "__builtin__.open"
+
+
+def test_validate_file_exist_passes_when_file_exists():
+ module = MagicMock()
+ with patch("os.path.exists", return_value=True):
+ jenkins_credential.validate_file_exist(module, "/some/file/path")
+ module.fail_json.assert_not_called()
+
+
+def test_validate_file_exist_fails_when_file_missing():
+ module = MagicMock()
+ with patch("os.path.exists", return_value=False):
+ jenkins_credential.validate_file_exist(module, "/missing/file/path")
+ module.fail_json.assert_called_once_with(
+ msg="File not found: /missing/file/path"
+ )
+
+
+@patch(
+ "ansible_collections.community.general.plugins.modules.jenkins_credential.fetch_url"
+)
+def test_get_jenkins_crumb_sets_crumb_header(fetch_mock):
+ module = MagicMock()
+ module.params = {"type": "file", "url": "http://localhost:8080"}
+ headers = {}
+
+ fake_response = MagicMock()
+ fake_response.read.return_value = json.dumps(
+ {"crumbRequestField": "crumb_field", "crumb": "abc123"}
+ ).encode("utf-8")
+
+ fetch_mock.return_value = (
+ fake_response,
+ {"status": 200, "set-cookie": "JSESSIONID=something; Path=/"},
+ )
+
+ crumb_request_field, crumb, session_coockie = jenkins_credential.get_jenkins_crumb(
+ module, headers
+ )
+
+ assert "Cookie" not in headers
+ assert "crumb_field" in headers
+ assert crumb == "abc123"
+ assert headers[crumb_request_field] == crumb
+
+
+@patch(
+ "ansible_collections.community.general.plugins.modules.jenkins_credential.fetch_url"
+)
+def test_get_jenkins_crumb_sets_cookie_if_type_token(fetch_mock):
+ module = MagicMock()
+ module.params = {"type": "token", "url": "http://localhost:8080"}
+ headers = {}
+
+ fake_response = MagicMock()
+ fake_response.read.return_value = json.dumps(
+ {"crumbRequestField": "crumb_field", "crumb": "secure"}
+ ).encode("utf-8")
+
+ fetch_mock.return_value = (
+ fake_response,
+ {"status": 200, "set-cookie": "JSESSIONID=token-cookie; Path=/"},
+ )
+
+ crumb_request_field, crumb, session_cookie = jenkins_credential.get_jenkins_crumb(
+ module, headers
+ )
+
+ assert "crumb_field" in headers
+ assert crumb == "secure"
+ assert headers[crumb_request_field] == crumb
+ assert headers["Cookie"] == session_cookie
+
+
+@patch(
+ "ansible_collections.community.general.plugins.modules.jenkins_credential.fetch_url"
+)
+def test_get_jenkins_crumb_fails_on_non_200_status(fetch_mock):
+ module = MagicMock()
+ module.params = {"type": "file", "url": "http://localhost:8080"}
+ headers = {}
+
+ fetch_mock.return_value = (MagicMock(), {"status": 403})
+
+ jenkins_credential.get_jenkins_crumb(module, headers)
+
+ module.fail_json.assert_called_once()
+ assert "Failed to fetch Jenkins crumb" in module.fail_json.call_args[1]["msg"]
+
+
+@patch(
+ "ansible_collections.community.general.plugins.modules.jenkins_credential.fetch_url"
+)
+def test_get_jenkins_crumb_removes_job_from_url(fetch_mock):
+ module = MagicMock()
+ module.params = {"type": "file", "url": "http://localhost:8080/job/test"}
+ headers = {}
+
+ fake_response = MagicMock()
+ fake_response.read.return_value = json.dumps(
+ {"crumbRequestField": "Jenkins-Crumb", "crumb": "xyz"}
+ ).encode("utf-8")
+
+ fetch_mock.return_value = (fake_response, {"status": 200, "set-cookie": ""})
+
+ jenkins_credential.get_jenkins_crumb(module, headers)
+
+ url_called = fetch_mock.call_args[0][1]
+ assert url_called == "http://localhost:8080/crumbIssuer/api/json"
+
+
+def test_clean_data_removes_extraneous_fields():
+ data = {
+ "id": "cred1",
+ "description": "test",
+ "jenkins_user": "admin",
+ "token": "secret",
+ "url": "http://localhost:8080",
+ "file_path": None,
+ }
+ expected = {"id": "cred1", "description": "test"}
+ result = jenkins_credential.clean_data(data)
+ assert result == expected, "Expected {}, got {}".format(expected, result)
+
+
+@patch(
+ "ansible_collections.community.general.plugins.modules.jenkins_credential.fetch_url"
+)
+def test_target_exists_returns_true_on_200(fetch_url_mock):
+ module = MagicMock()
+ module.params = {
+ "url": "http://localhost:8080",
+ "location": "system",
+ "scope": "_",
+ "id": "my-id",
+ "jenkins_user": "admin",
+ "token": "secret",
+ "type": "file",
+ }
+
+ fetch_url_mock.return_value = (MagicMock(), {"status": 200})
+ assert jenkins_credential.target_exists(module) is True
+
+
+@patch(
+ "ansible_collections.community.general.plugins.modules.jenkins_credential.fetch_url"
+)
+def test_target_exists_returns_false_on_404(fetch_url_mock):
+ module = MagicMock()
+ module.params = {
+ "url": "http://localhost:8080",
+ "location": "system",
+ "scope": "_",
+ "id": "my-id",
+ "jenkins_user": "admin",
+ "token": "secret",
+ "type": "file",
+ }
+
+ fetch_url_mock.return_value = (MagicMock(), {"status": 404})
+ assert jenkins_credential.target_exists(module) is False
+
+
+@patch(
+ "ansible_collections.community.general.plugins.modules.jenkins_credential.fetch_url"
+)
+def test_target_exists_calls_fail_json_on_unexpected_status(fetch_url_mock):
+ module = MagicMock()
+ module.params = {
+ "url": "http://localhost:8080",
+ "location": "system",
+ "scope": "_",
+ "id": "my-id",
+ "jenkins_user": "admin",
+ "token": "secret",
+ "type": "file",
+ }
+
+ fetch_url_mock.return_value = (MagicMock(), {"status": 500})
+ jenkins_credential.target_exists(module)
+ module.fail_json.assert_called_once()
+ assert "Unexpected status code" in module.fail_json.call_args[1]["msg"]
+
+
+@patch(
+ "ansible_collections.community.general.plugins.modules.jenkins_credential.fetch_url"
+)
+def test_target_exists_skips_check_for_token_type(fetch_url_mock):
+ module = MagicMock()
+ module.params = {
+ "type": "token",
+ "url": "ignored",
+ "location": "ignored",
+ "scope": "ignored",
+ "id": "ignored",
+ "jenkins_user": "ignored",
+ "token": "ignored",
+ }
+
+ assert jenkins_credential.target_exists(module) is False
+ fetch_url_mock.assert_not_called()
+
+
+@patch(
+ "ansible_collections.community.general.plugins.modules.jenkins_credential.fetch_url"
+)
+def test_delete_target_fails_deleting(fetch_mock):
+ module = MagicMock()
+ module.params = {
+ "type": "token",
+ "jenkins_user": "admin",
+ "url": "http://localhost:8080",
+ "id": "token-id",
+ "location": "system",
+ "scope": "_",
+ }
+ headers = {"Authorization": "Basic abc", "Content-Type": "whatever"}
+
+ fetch_mock.return_value = (MagicMock(), {"status": 500})
+
+ jenkins_credential.delete_target(module, headers)
+
+ module.fail_json.assert_called_once()
+ assert "Failed to delete" in module.fail_json.call_args[1]["msg"]
+
+
+@patch(
+ "ansible_collections.community.general.plugins.modules.jenkins_credential.fetch_url",
+ side_effect=Exception("network error"),
+)
+def test_delete_target_raises_exception(fetch_mock):
+ module = MagicMock()
+ module.params = {
+ "type": "scope",
+ "jenkins_user": "admin",
+ "location": "system",
+ "url": "http://localhost:8080",
+ "id": "domain-id",
+ "scope": "_",
+ }
+ headers = {"Authorization": "Basic auth"}
+
+ jenkins_credential.delete_target(module, headers)
+
+ module.fail_json.assert_called_once()
+ assert "Exception during delete" in module.fail_json.call_args[1]["msg"]
+ assert "network error" in module.fail_json.call_args[1]["msg"]
+
+
+def test_read_privateKey_returns_trimmed_contents():
+ module = MagicMock()
+ module.params = {"private_key_path": "/fake/path/key.pem"}
+
+ mocked_file = mock_open(
+ read_data="\n \t -----BEGIN PRIVATE KEY-----\nKEYDATA\n-----END PRIVATE KEY----- \n\n"
+ )
+ with patch(open_path, mocked_file):
+ result = jenkins_credential.read_privateKey(module)
+
+ expected = "-----BEGIN PRIVATE KEY-----\nKEYDATA\n-----END PRIVATE KEY-----"
+
+ assert result == expected
+ mocked_file.assert_called_once_with("/fake/path/key.pem", "r")
+
+
+def test_read_privateKey_handles_file_read_error():
+ module = MagicMock()
+ module.params = {"private_key_path": "/invalid/path.pem"}
+
+ with patch(open_path, side_effect=IOError("cannot read file")):
+ jenkins_credential.read_privateKey(module)
+
+ module.fail_json.assert_called_once()
+ assert "Failed to read private key file" in module.fail_json.call_args[1]["msg"]
+
+
+def test_embed_file_into_body_returns_multipart_fields():
+ module = MagicMock()
+ file_path = "/fake/path/secret.pem"
+ credentials = {"id": "my-id"}
+ fake_file_content = b"MY SECRET DATA"
+
+ mock = mock_open()
+ mock.return_value.read.return_value = fake_file_content
+
+ with patch("os.path.basename", return_value="secret.pem"), patch.object(
+ builtins, "open", mock
+ ):
+ body, content_type = jenkins_credential.embed_file_into_body(
+ module, file_path, credentials.copy()
+ )
+
+ assert "multipart/form-data; boundary=" in content_type
+
+ # Check if file content is embedded in body
+ assert b"MY SECRET DATA" in body
+ assert b'filename="secret.pem"' in body
+
+
+def test_embed_file_into_body_fails_when_file_unreadable():
+ module = MagicMock()
+ file_path = "/fake/path/missing.pem"
+ credentials = {"id": "something"}
+
+ with patch(open_path, side_effect=IOError("can't read file")):
+ jenkins_credential.embed_file_into_body(module, file_path, credentials)
+
+ module.fail_json.assert_called_once()
+ assert "Failed to read file" in module.fail_json.call_args[1]["msg"]
+
+
+def test_embed_file_into_body_injects_file_keys_into_credentials():
+ module = MagicMock()
+ file_path = "/fake/path/file.txt"
+ credentials = {"id": "test"}
+
+ with patch(open_path, mock_open(read_data=b"1234")), patch(
+ "os.path.basename", return_value="file.txt"
+ ):
+
+ jenkins_credential.embed_file_into_body(module, file_path, credentials)
+
+ assert credentials["file"] == "file0"
+ assert credentials["fileName"] == "file.txt"
diff --git a/tests/unit/plugins/modules/test_linode_v4.py b/tests/unit/plugins/modules/test_linode_v4.py
index d4bcdf3243..47e77a52e8 100644
--- a/tests/unit/plugins/modules/test_linode_v4.py
+++ b/tests/unit/plugins/modules/test_linode_v4.py
@@ -7,15 +7,10 @@ __metaclass__ = type
import json
import os
-import sys
import pytest
linode_apiv4 = pytest.importorskip('linode_api4')
-mandatory_py_version = pytest.mark.skipif(
- sys.version_info < (2, 7),
- reason='The linode_api4 dependency requires python2.7 or higher'
-)
from linode_api4.errors import ApiError as LinodeApiError
from linode_api4 import LinodeClient
diff --git a/tests/unit/plugins/modules/test_lxca_cmms.py b/tests/unit/plugins/modules/test_lxca_cmms.py
index 888a6c8d52..3c79148022 100644
--- a/tests/unit/plugins/modules/test_lxca_cmms.py
+++ b/tests/unit/plugins/modules/test_lxca_cmms.py
@@ -61,8 +61,8 @@ class TestMyModule():
command_options=dict(default='cmms', choices=['cmms', 'cmms_by_uuid',
'cmms_by_chassis_uuid']),
auth_url=dict(required=True),
- uuid=dict(default=None),
- chassis=dict(default=None),
+ uuid=dict(),
+ chassis=dict(),
)
_setup_conn.return_value = "Fake connection"
_execute_module.return_value = []
diff --git a/tests/unit/plugins/modules/test_lxca_nodes.py b/tests/unit/plugins/modules/test_lxca_nodes.py
index 98c8a551d4..1f8ab84368 100644
--- a/tests/unit/plugins/modules/test_lxca_nodes.py
+++ b/tests/unit/plugins/modules/test_lxca_nodes.py
@@ -63,8 +63,8 @@ class TestMyModule():
'nodes_status_managed',
'nodes_status_unmanaged']),
auth_url=dict(required=True),
- uuid=dict(default=None),
- chassis=dict(default=None),
+ uuid=dict(),
+ chassis=dict(),
)
_setup_conn.return_value = "Fake connection"
_execute_module.return_value = []
diff --git a/tests/unit/plugins/modules/test_modprobe.py b/tests/unit/plugins/modules/test_modprobe.py
index 12f6af7273..bada481cfc 100644
--- a/tests/unit/plugins/modules/test_modprobe.py
+++ b/tests/unit/plugins/modules/test_modprobe.py
@@ -147,7 +147,7 @@ class TestUnloadModule(ModuleTestCase):
class TestModuleIsLoadedPersistently(ModuleTestCase):
def setUp(self):
- if (sys.version_info[0] == 3 and sys.version_info[1] < 7) or (sys.version_info[0] == 2 and sys.version_info[1] < 7):
+ if sys.version_info[0] == 3 and sys.version_info[1] < 7:
self.skipTest("open_mock doesn't support readline in earlier python versions")
super(TestModuleIsLoadedPersistently, self).setUp()
@@ -222,7 +222,7 @@ class TestModuleIsLoadedPersistently(ModuleTestCase):
class TestPermanentParams(ModuleTestCase):
def setUp(self):
- if (sys.version_info[0] == 3 and sys.version_info[1] < 7) or (sys.version_info[0] == 2 and sys.version_info[1] < 7):
+ if sys.version_info[0] == 3 and sys.version_info[1] < 7:
self.skipTest("open_mock doesn't support readline in earlier python versions")
super(TestPermanentParams, self).setUp()
diff --git a/tests/unit/plugins/modules/test_nmcli.py b/tests/unit/plugins/modules/test_nmcli.py
index 614c58a842..79f2f2ea1a 100644
--- a/tests/unit/plugins/modules/test_nmcli.py
+++ b/tests/unit/plugins/modules/test_nmcli.py
@@ -4350,6 +4350,8 @@ def test_bond_connection_unchanged(mocked_generic_connection_diff_check, capfd):
argument_spec=dict(
ignore_unsupported_suboptions=dict(type='bool', default=False),
autoconnect=dict(type='bool', default=True),
+ autoconnect_priority=dict(type='int'),
+ autoconnect_retries=dict(type='int'),
state=dict(type='str', required=True, choices=['absent', 'present']),
conn_name=dict(type='str', required=True),
conn_reload=dict(type='bool', required=False, default=False),
diff --git a/tests/unit/plugins/modules/test_pacemaker_cluster.py b/tests/unit/plugins/modules/test_pacemaker_cluster.py
new file mode 100644
index 0000000000..ad69411a9c
--- /dev/null
+++ b/tests/unit/plugins/modules/test_pacemaker_cluster.py
@@ -0,0 +1,19 @@
+# -*- coding: utf-8 -*-
+# Author: Dexter Le (dextersydney2001@gmail.com)
+# Largely adapted from test_redhat_subscription by
+# Jiri Hnidek (jhnidek@redhat.com)
+#
+# Copyright (c) Dexter Le (dextersydney2001@gmail.com)
+# Copyright (c) Jiri Hnidek (jhnidek@redhat.com)
+#
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+from ansible_collections.community.general.plugins.modules import pacemaker_cluster
+from .uthelper import UTHelper, RunCommandMock
+
+UTHelper.from_module(pacemaker_cluster, __name__, mocks=[RunCommandMock])
diff --git a/tests/unit/plugins/modules/test_pacemaker_cluster.yaml b/tests/unit/plugins/modules/test_pacemaker_cluster.yaml
new file mode 100644
index 0000000000..785a7cb4f6
--- /dev/null
+++ b/tests/unit/plugins/modules/test_pacemaker_cluster.yaml
@@ -0,0 +1,488 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) Dexter Le (dextersydney2001@gmail.com)
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+---
+anchors:
+ environ: &env-def {environ_update: {LANGUAGE: C, LC_ALL: C}, check_rc: false}
+test_cases:
+ - id: test_online_minimal_input_initial_online_all_no_maintenance
+ input:
+ state: online
+ output:
+ changed: false
+ previous_value: ' * Online: [ pc1, pc2, pc3 ]'
+ value: ' * Online: [ pc1, pc2, pc3 ]'
+ mocks:
+ run_command:
+ - command: [/testbin/pcs, cluster, status]
+ environ: *env-def
+ rc: 0
+ out: ' * Online: [ pc1, pc2, pc3 ]'
+ err: ""
+ - command: [/testbin/pcs, cluster, start, --all, --wait=300]
+ environ: *env-def
+ rc: 0
+ out: "Starting Cluster..."
+ err: ""
+ - command: [/testbin/pcs, property, config]
+ environ: *env-def
+ rc: 1
+ out: |
+ Cluster Properties: cib-bootstrap-options
+ cluster-infrastructure=corosync
+ cluster-name=hacluster
+ dc-version=2.1.9-1.fc41-7188dbf
+ have-watchdog=false
+ err: ""
+ - command: [/testbin/pcs, cluster, status]
+ environ: *env-def
+ rc: 0
+ out: ' * Online: [ pc1, pc2, pc3 ]'
+ err: ""
+ - id: test_online_minimal_input_initial_offline_all_maintenance
+ input:
+ state: online
+ output:
+ changed: true
+ previous_value: 'Error: cluster is not currently running on this node'
+ value: ' * Online: [ pc1, pc2, pc3 ]'
+ mocks:
+ run_command:
+ - command: [/testbin/pcs, cluster, status]
+ environ: *env-def
+ rc: 1
+ out: 'Error: cluster is not currently running on this node'
+ err: ""
+ - command: [/testbin/pcs, cluster, start, --all, --wait=300]
+ environ: *env-def
+ rc: 0
+ out: "Starting Cluster..."
+ err: ""
+ - command: [/testbin/pcs, property, config]
+ environ: *env-def
+ rc: 0
+ out: |
+ Cluster Properties: cib-bootstrap-options
+ cluster-infrastructure=corosync
+ cluster-name=hacluster
+ dc-version=2.1.9-1.fc41-7188dbf
+ have-watchdog=false
+ maintenance-mode=true
+ err: ""
+ - command: [/testbin/pcs, property, set, maintenance-mode=false]
+ environ: *env-def
+ rc: 0
+ out: ""
+ err: ""
+ - command: [/testbin/pcs, cluster, status]
+ environ: *env-def
+ rc: 0
+ out: ' * Online: [ pc1, pc2, pc3 ]'
+ err: ""
+ - id: test_online_minimal_input_initial_offline_single_nonlocal_no_maintenance
+ input:
+ state: online
+ name: pc2
+ output:
+ changed: true
+ previous_value: '* Node pc2: UNCLEAN (offline)\n * Online: [ pc1, pc3 ]'
+ value: ' * Online: [ pc1, pc2, pc3 ]'
+ mocks:
+ run_command:
+ - command: [/testbin/pcs, cluster, status]
+ environ: *env-def
+ rc: 0
+ out: '* Node pc2: UNCLEAN (offline)\n * Online: [ pc1, pc3 ]'
+ err: ""
+ - command: [/testbin/pcs, cluster, start, pc2, --wait=300]
+ environ: *env-def
+ rc: 0
+ out: "Starting Cluster..."
+ err: ""
+ - command: [/testbin/pcs, property, config]
+ environ: *env-def
+ rc: 1
+ out: |
+ Cluster Properties: cib-bootstrap-options
+ cluster-infrastructure=corosync
+ cluster-name=hacluster
+ dc-version=2.1.9-1.fc41-7188dbf
+ have-watchdog=false
+ err: ""
+ - command: [/testbin/pcs, cluster, status]
+ environ: *env-def
+ rc: 0
+ out: ' * Online: [ pc1, pc2, pc3 ]'
+ err: ""
+ - id: test_online_minimal_input_initial_offline_single_local_no_maintenance
+ input:
+ state: online
+ name: pc1
+ output:
+ changed: true
+ previous_value: 'Error: cluster is not currently running on this node'
+ value: ' * Online: [ pc1, pc2, pc3 ]'
+ mocks:
+ run_command:
+ - command: [/testbin/pcs, cluster, status]
+ environ: *env-def
+ rc: 1
+ out: 'Error: cluster is not currently running on this node'
+ err: ""
+ - command: [/testbin/pcs, cluster, start, pc1, --wait=300]
+ environ: *env-def
+ rc: 0
+ out: "Starting Cluster..."
+ err: ""
+ - command: [/testbin/pcs, property, config]
+ environ: *env-def
+ rc: 1
+ out: |
+ Cluster Properties: cib-bootstrap-options
+ cluster-infrastructure=corosync
+ cluster-name=hacluster
+ dc-version=2.1.9-1.fc41-7188dbf
+ have-watchdog=false
+ err: ""
+ - command: [/testbin/pcs, cluster, status]
+ environ: *env-def
+ rc: 0
+ out: ' * Online: [ pc1, pc2, pc3 ]'
+ err: ""
+ - id: test_offline_minimal_input_initial_online_all
+ input:
+ state: offline
+ output:
+ changed: true
+ previous_value: ' * Online: [ pc1, pc2, pc3 ]'
+ value: 'Error: cluster is not currently running on this node'
+ mocks:
+ run_command:
+ - command: [/testbin/pcs, cluster, status]
+ environ: *env-def
+ rc: 0
+ out: ' * Online: [ pc1, pc2, pc3 ]'
+ err: ""
+ - command: [/testbin/pcs, cluster, stop, --all, --wait=300]
+ environ: *env-def
+ rc: 0
+ out: "Stopping Cluster..."
+ err: ""
+ - command: [/testbin/pcs, cluster, status]
+ environ: *env-def
+ rc: 1
+ out: 'Error: cluster is not currently running on this node'
+ err: ""
+ - id: test_offline_minimal_input_initial_offline_all
+ input:
+ state: offline
+ output:
+ changed: false
+ previous_value: 'Error: cluster is not currently running on this node'
+ value: 'Error: cluster is not currently running on this node'
+ mocks:
+ run_command:
+ - command: [/testbin/pcs, cluster, status]
+ environ: *env-def
+ rc: 1
+ out: 'Error: cluster is not currently running on this node'
+ err: ""
+ - command: [/testbin/pcs, cluster, stop, --all, --wait=300]
+ environ: *env-def
+ rc: 0
+ out: "Stopping Cluster..."
+ err: ""
+ - command: [/testbin/pcs, cluster, status]
+ environ: *env-def
+ rc: 1
+ out: 'Error: cluster is not currently running on this node'
+ err: ""
+ - id: test_offline_minimal_input_initial_offline_single_nonlocal
+ input:
+ state: offline
+ name: pc3
+ output:
+ changed: true
+ previous_value: '* Node pc2: UNCLEAN (offline)\n * Online: [ pc1, pc3 ]'
+ value: '* Node pc2: UNCLEAN (offline)\n* Node pc3: UNCLEAN (offline)\n * Online: [ pc1 ]'
+ mocks:
+ run_command:
+ - command: [/testbin/pcs, cluster, status]
+ environ: *env-def
+ rc: 0
+ out: '* Node pc2: UNCLEAN (offline)\n * Online: [ pc1, pc3 ]'
+ err: ""
+ - command: [/testbin/pcs, cluster, stop, pc3, --wait=300]
+ environ: *env-def
+ rc: 0
+ out: "Stopping Cluster..."
+ err: ""
+ - command: [/testbin/pcs, cluster, status]
+ environ: *env-def
+ rc: 0
+ out: '* Node pc2: UNCLEAN (offline)\n* Node pc3: UNCLEAN (offline)\n * Online: [ pc1 ]'
+ err: ""
+ - id: test_restart_minimal_input_initial_online_all_no_maintenance
+ input:
+ state: restart
+ output:
+ changed: false
+ previous_value: ' * Online: [ pc1, pc2, pc3 ]'
+ value: ' * Online: [ pc1, pc2, pc3 ]'
+ mocks:
+ run_command:
+ - command: [/testbin/pcs, cluster, status]
+ environ: *env-def
+ rc: 0
+ out: ' * Online: [ pc1, pc2, pc3 ]'
+ err: ""
+ - command: [/testbin/pcs, cluster, stop, --all, --wait=300]
+ environ: *env-def
+ rc: 0
+ out: "Stopping Cluster..."
+ err: ""
+ - command: [/testbin/pcs, cluster, start, --all, --wait=300]
+ environ: *env-def
+ rc: 0
+ out: "Starting Cluster..."
+ err: ""
+ - command: [/testbin/pcs, property, config]
+ environ: *env-def
+ rc: 1
+ out: |
+ Cluster Properties: cib-bootstrap-options
+ cluster-infrastructure=corosync
+ cluster-name=hacluster
+ dc-version=2.1.9-1.fc41-7188dbf
+ have-watchdog=false
+ err: ""
+ - command: [/testbin/pcs, cluster, status]
+ environ: *env-def
+ rc: 0
+ out: ' * Online: [ pc1, pc2, pc3 ]'
+ err: ""
+ - id: test_restart_minimal_input_initial_offline_all_no_maintenance
+ input:
+ state: restart
+ output:
+ changed: true
+ previous_value: 'Error: cluster is not currently running on this node'
+ value: ' * Online: [ pc1, pc2, pc3 ]'
+ mocks:
+ run_command:
+ - command: [/testbin/pcs, cluster, status]
+ environ: *env-def
+ rc: 1
+ out: 'Error: cluster is not currently running on this node'
+ err: ""
+ - command: [/testbin/pcs, cluster, stop, --all, --wait=300]
+ environ: *env-def
+ rc: 0
+ out: "Stopping Cluster..."
+ err: ""
+ - command: [/testbin/pcs, cluster, start, --all, --wait=300]
+ environ: *env-def
+ rc: 0
+ out: "Starting Cluster..."
+ err: ""
+ - command: [/testbin/pcs, property, config]
+ environ: *env-def
+ rc: 1
+ out: |
+ Cluster Properties: cib-bootstrap-options
+ cluster-infrastructure=corosync
+ cluster-name=hacluster
+ dc-version=2.1.9-1.fc41-7188dbf
+ have-watchdog=false
+ err: ""
+ - command: [/testbin/pcs, cluster, status]
+ environ: *env-def
+ rc: 0
+ out: ' * Online: [ pc1, pc2, pc3 ]'
+ err: ""
+ - id: test_restart_minimal_input_initial_offline_all_maintenance
+ input:
+ state: restart
+ output:
+ changed: true
+ previous_value: 'Error: cluster is not currently running on this node'
+ value: ' * Online: [ pc1, pc2, pc3 ]'
+ mocks:
+ run_command:
+ - command: [/testbin/pcs, cluster, status]
+ environ: *env-def
+ rc: 1
+ out: 'Error: cluster is not currently running on this node'
+ err: ""
+ - command: [/testbin/pcs, cluster, stop, --all, --wait=300]
+ environ: *env-def
+ rc: 0
+ out: "Stopping Cluster..."
+ err: ""
+ - command: [/testbin/pcs, cluster, start, --all, --wait=300]
+ environ: *env-def
+ rc: 0
+ out: "Starting Cluster..."
+ err: ""
+ - command: [/testbin/pcs, property, config]
+ environ: *env-def
+ rc: 0
+ out: |
+ Cluster Properties: cib-bootstrap-options
+ cluster-infrastructure=corosync
+ cluster-name=hacluster
+ dc-version=2.1.9-1.fc41-7188dbf
+ have-watchdog=false
+ maintenance-mode=true
+ err: ""
+ - command: [/testbin/pcs, property, set, maintenance-mode=false]
+ environ: *env-def
+ rc: 0
+ out: ""
+ err: ""
+ - command: [/testbin/pcs, cluster, status]
+ environ: *env-def
+ rc: 0
+ out: ' * Online: [ pc1, pc2, pc3 ]'
+ err: ""
+ - id: test_maintenance_minimal_input_initial_online
+ input:
+ state: maintenance
+ output:
+ changed: true
+ previous_value: 'maintenance-mode=false (default)'
+ value: 'maintenance-mode=true'
+ mocks:
+ run_command:
+ - command: [/testbin/pcs, property, config, maintenance-mode]
+ environ: *env-def
+ rc: 0
+ out: 'maintenance-mode=false (default)'
+ err: ""
+ - command: [/testbin/pcs, property, set, maintenance-mode=true]
+ environ: *env-def
+ rc: 0
+ out: ""
+ err: ""
+ - command: [/testbin/pcs, property, config, maintenance-mode]
+ environ: *env-def
+ rc: 0
+ out: 'maintenance-mode=true'
+ err: ""
+ - id: test_maintenance_minimal_input_initial_offline
+ input:
+ state: maintenance
+ output:
+ failed: true
+ msg: "pcs failed with error (rc=1): Error: unable to get cib"
+ mocks:
+ run_command:
+ - command: [/testbin/pcs, property, config, maintenance-mode]
+ environ: *env-def
+ rc: 1
+ out: ""
+ err: "Error: unable to get cib"
+ - command: [/testbin/pcs, property, set, maintenance-mode=true]
+ environ: *env-def
+ rc: 1
+ out: ""
+ err: "Error: unable to get cib"
+ - id: test_maintenance_minimal_input_initial_maintenance
+ input:
+ state: maintenance
+ output:
+ changed: false
+ previous_value: 'maintenance-mode=true'
+ value: 'maintenance-mode=true'
+ mocks:
+ run_command:
+ - command: [/testbin/pcs, property, config, maintenance-mode]
+ environ: *env-def
+ rc: 0
+ out: 'maintenance-mode=true'
+ err: ""
+ - command: [/testbin/pcs, property, set, maintenance-mode=true]
+ environ: *env-def
+ rc: 0
+ out: ""
+ err: ""
+ - command: [/testbin/pcs, property, config, maintenance-mode]
+ environ: *env-def
+ rc: 0
+ out: 'maintenance-mode=true'
+ err: ""
+ - id: test_cleanup_minimal_input_initial_resources_not_exist
+ input:
+ state: cleanup
+ output:
+ changed: false
+ previous_value: "NO resources configured"
+ value: "NO resources configured"
+ mocks:
+ run_command:
+ - command: [/testbin/pcs, resource, status]
+ environ: *env-def
+ rc: 0
+ out: "NO resources configured"
+ err: ""
+ - command: [/testbin/pcs, resource, cleanup]
+ environ: *env-def
+ rc: 0
+ out: "Cleaned up all resources on all nodes"
+ err: ""
+ - command: [/testbin/pcs, resource, status]
+ environ: *env-def
+ rc: 0
+ out: "NO resources configured"
+ err: ""
+ - id: test_cleanup_minimal_input_initial_resources_exists
+ input:
+ state: cleanup
+ output:
+ changed: true
+ previous_value: " * virtual-ip\t(ocf:heartbeat:IPAddr2):\t Started"
+ value: "NO resources configured"
+ mocks:
+ run_command:
+ - command: [/testbin/pcs, resource, status]
+ environ: *env-def
+ rc: 0
+ out: " * virtual-ip\t(ocf:heartbeat:IPAddr2):\t Started"
+ err: ""
+ - command: [/testbin/pcs, resource, cleanup]
+ environ: *env-def
+ rc: 0
+ out: "Cleaned up all resources on all nodes"
+ err: ""
+ - command: [/testbin/pcs, resource, status]
+ environ: *env-def
+ rc: 0
+ out: "NO resources configured"
+ err: ""
+ - id: test_cleanup_specific_minimal_input_initial_resources_exists
+ input:
+ state: cleanup
+ name: virtual-ip
+ output:
+ changed: true
+ previous_value: " * virtual-ip\t(ocf:heartbeat:IPAddr2):\t Started"
+ value: "NO resources configured"
+ mocks:
+ run_command:
+ - command: [/testbin/pcs, resource, status, virtual-ip]
+ environ: *env-def
+ rc: 0
+ out: " * virtual-ip\t(ocf:heartbeat:IPAddr2):\t Started"
+ err: ""
+ - command: [/testbin/pcs, resource, cleanup, virtual-ip]
+ environ: *env-def
+ rc: 0
+ out: "Cleaned up virtual-ip on X"
+ err: ""
+ - command: [/testbin/pcs, resource, status, virtual-ip]
+ environ: *env-def
+ rc: 0
+ out: "NO resources configured"
+ err: ""
diff --git a/tests/unit/plugins/modules/test_pacemaker_resource.yaml b/tests/unit/plugins/modules/test_pacemaker_resource.yaml
index 3739780424..76679d14d9 100644
--- a/tests/unit/plugins/modules/test_pacemaker_resource.yaml
+++ b/tests/unit/plugins/modules/test_pacemaker_resource.yaml
@@ -30,6 +30,16 @@ test_cases:
environ: *env-def
rc: 1
out: ""
+ err: "Error: resource or tag id 'virtual-ip' not found"
+ - command: [/testbin/pcs, property, config]
+ environ: *env-def
+ rc: 1
+ out: |
+ Cluster Properties: cib-bootstrap-options
+ cluster-infrastructure=corosync
+ cluster-name=hacluster
+ dc-version=2.1.9-1.fc41-7188dbf
+ have-watchdog=false
err: ""
- command: [/testbin/pcs, resource, create, virtual-ip, IPaddr2, "ip=[192.168.2.1]", --wait=300]
environ: *env-def
@@ -41,6 +51,63 @@ test_cases:
rc: 0
out: " * virtual-ip\t(ocf:heartbeat:IPAddr2):\t Started"
err: ""
+ - id: test_present_filled_input_resource_not_exist
+ input:
+ state: present
+ name: virtual-ip
+ resource_type:
+ resource_name: IPaddr2
+ resource_option:
+ - "ip=[192.168.2.1]"
+ resource_operation:
+ - operation_action: start
+ operation_option:
+ - timeout=1200
+ - operation_action: stop
+ operation_option:
+ - timeout=1200
+ - operation_action: monitor
+ operation_option:
+ - timeout=1200
+ resource_meta:
+ - test_meta1=123
+ - test_meta2=456
+ resource_argument:
+ argument_action: group
+ argument_option:
+ - test_group
+ wait: 200
+ output:
+ changed: true
+ previous_value: null
+ value: " * virtual-ip\t(ocf:heartbeat:IPAddr2):\t Started"
+ mocks:
+ run_command:
+ - command: [/testbin/pcs, resource, status, virtual-ip]
+ environ: *env-def
+ rc: 1
+ out: ""
+ err: "Error: resource or tag id 'virtual-ip' not found"
+ - command: [/testbin/pcs, property, config]
+ environ: *env-def
+ rc: 1
+ out: |
+ Cluster Properties: cib-bootstrap-options
+ cluster-infrastructure=corosync
+ cluster-name=hacluster
+ dc-version=2.1.9-1.fc41-7188dbf
+ have-watchdog=false
+ err: ""
+ - command: [/testbin/pcs, resource, create, virtual-ip, IPaddr2, "ip=[192.168.2.1]", op, start, timeout=1200, op, stop, timeout=1200, op, monitor, timeout=1200, meta, test_meta1=123, meta, test_meta2=456, --group, test_group, --wait=200]
+ environ: *env-def
+ rc: 0
+ out: "Assumed agent name 'ocf:heartbeat:IPaddr2' (deduced from 'IPAddr2')"
+ err: ""
+ - command: [/testbin/pcs, resource, status, virtual-ip]
+ environ: *env-def
+ rc: 0
+ out: " * virtual-ip\t(ocf:heartbeat:IPAddr2):\t Started"
+ err: ""
- id: test_present_minimal_input_resource_exists
input:
state: present
@@ -60,6 +127,16 @@ test_cases:
rc: 0
out: " * virtual-ip\t(ocf:heartbeat:IPAddr2):\t Started"
err: ""
+ - command: [/testbin/pcs, property, config]
+ environ: *env-def
+ rc: 1
+ out: |
+ Cluster Properties: cib-bootstrap-options
+ cluster-infrastructure=corosync
+ cluster-name=hacluster
+ dc-version=2.1.9-1.fc41-7188dbf
+ have-watchdog=false
+ err: ""
- command: [/testbin/pcs, resource, create, virtual-ip, IPaddr2, "ip=[192.168.2.1]", --wait=300]
environ: *env-def
rc: 1
@@ -70,6 +147,46 @@ test_cases:
rc: 0
out: " * virtual-ip\t(ocf:heartbeat:IPAddr2):\t Started"
err: ""
+ - id: test_present_minimal_input_resource_maintenance_mode
+ input:
+ state: present
+ name: virtual-ip
+ resource_type:
+ resource_name: IPaddr2
+ resource_option:
+ - "ip=[192.168.2.1]"
+ output:
+ changed: true
+ previous_value: null
+ value: " * virtual-ip\t(ocf:heartbeat:IPAddr2):\t Stopped"
+ mocks:
+ run_command:
+ - command: [/testbin/pcs, resource, status, virtual-ip]
+ environ: *env-def
+ rc: 1
+ out: ""
+ err: ""
+ - command: [/testbin/pcs, property, config]
+ environ: *env-def
+ rc: 0
+ out: |
+ Cluster Properties: cib-bootstrap-options
+ cluster-infrastructure=corosync
+ cluster-name=hacluster
+ dc-version=2.1.9-1.fc41-7188dbf
+ have-watchdog=false
+ maintenance-mode=true
+ err: ""
+ - command: [/testbin/pcs, resource, create, virtual-ip, IPaddr2, "ip=[192.168.2.1]", --wait=300]
+ environ: *env-def
+ rc: 1
+ out: ""
+ err: "Error: resource 'virtual-ip' is not running on any node"
+ - command: [/testbin/pcs, resource, status, virtual-ip]
+ environ: *env-def
+ rc: 0
+ out: " * virtual-ip\t(ocf:heartbeat:IPAddr2):\t Stopped"
+ err: ""
- id: test_absent_minimal_input_resource_not_exist
input:
state: absent
@@ -84,6 +201,16 @@ test_cases:
environ: *env-def
rc: 1
out: ""
+ err: "Error: resource or tag id 'virtual-ip' not found"
+ - command: [/testbin/pcs, property, config]
+ environ: *env-def
+ rc: 1
+ out: |
+ Cluster Properties: cib-bootstrap-options
+ cluster-infrastructure=corosync
+ cluster-name=hacluster
+ dc-version=2.1.9-1.fc41-7188dbf
+ have-watchdog=false
err: ""
- command: [/testbin/pcs, resource, remove, virtual-ip]
environ: *env-def
@@ -94,7 +221,7 @@ test_cases:
environ: *env-def
rc: 1
out: ""
- err: ""
+ err: "Error: resource or tag id 'virtual-ip' not found"
- id: test_absent_minimal_input_resource_exists
input:
state: absent
@@ -110,6 +237,16 @@ test_cases:
rc: 0
out: " * virtual-ip\t(ocf:heartbeat:IPAddr2):\t Started"
err: ""
+ - command: [/testbin/pcs, property, config]
+ environ: *env-def
+ rc: 1
+ out: |
+ Cluster Properties: cib-bootstrap-options
+ cluster-infrastructure=corosync
+ cluster-name=hacluster
+ dc-version=2.1.9-1.fc41-7188dbf
+ have-watchdog=false
+ err: ""
- command: [/testbin/pcs, resource, remove, virtual-ip]
environ: *env-def
rc: 0
@@ -119,7 +256,43 @@ test_cases:
environ: *env-def
rc: 1
out: ""
+ err: "Error: resource or tag id 'virtual-ip' not found"
+ - id: test_absent_minimal_input_maintenance_mode
+ input:
+ state: absent
+ name: virtual-ip
+ output:
+ changed: true
+ previous_value: " * virtual-ip\t(ocf:heartbeat:IPAddr2):\t Started"
+ value: null
+ mocks:
+ run_command:
+ - command: [/testbin/pcs, resource, status, virtual-ip]
+ environ: *env-def
+ rc: 0
+ out: " * virtual-ip\t(ocf:heartbeat:IPAddr2):\t Started"
err: ""
+ - command: [/testbin/pcs, property, config]
+ environ: *env-def
+ rc: 0
+ out: |
+ Cluster Properties: cib-bootstrap-options
+ cluster-infrastructure=corosync
+ cluster-name=hacluster
+ dc-version=2.1.9-1.fc41-7188dbf
+ have-watchdog=false
+ maintenance-mode=true
+ err: ""
+ - command: [/testbin/pcs, resource, remove, virtual-ip, --force]
+ environ: *env-def
+ rc: 0
+ out: ""
+ err: "Deleting Resource (and group) - virtual-ip"
+ - command: [/testbin/pcs, resource, status, virtual-ip]
+ environ: *env-def
+ rc: 1
+ out: ""
+ err: "Error: resource or tag id 'virtual-ip' not found"
- id: test_enabled_minimal_input_resource_not_exists
input:
state: enabled
@@ -133,7 +306,7 @@ test_cases:
environ: *env-def
rc: 1
out: ""
- err: ""
+ err: "Error: resource or tag id 'virtual-ip' not found"
- command: [/testbin/pcs, resource, enable, virtual-ip]
environ: *env-def
rc: 1
@@ -177,7 +350,7 @@ test_cases:
environ: *env-def
rc: 1
out: ""
- err: ""
+ err: "Error: resource or tag id 'virtual-ip' not found"
- command: [/testbin/pcs, resource, disable, virtual-ip]
environ: *env-def
rc: 1
diff --git a/tests/unit/plugins/modules/test_pacman_key.py b/tests/unit/plugins/modules/test_pacman_key.py
index ac85708985..d372885ce2 100644
--- a/tests/unit/plugins/modules/test_pacman_key.py
+++ b/tests/unit/plugins/modules/test_pacman_key.py
@@ -17,8 +17,9 @@ MOCK_BIN_PATH = '/mocked/path'
TESTING_KEYID = '14F26682D0916CDD81E37B6D61B7B526D98F0353'
TESTING_KEYFILE_PATH = '/tmp/pubkey.asc'
-# gpg --{show,list}-key output (key present)
-GPG_SHOWKEY_OUTPUT = '''tru::1:1616373715:0:3:1:5
+# gpg --{show,list}-key output (key present, but expired)
+GPG_SHOWKEY_OUTPUT_EXPIRED = """
+tru::1:1616373715:0:3:1:5
pub:-:4096:1:61B7B526D98F0353:1437155332:::-:::scSC::::::23::0:
fpr:::::::::14F26682D0916CDD81E37B6D61B7B526D98F0353:
uid:-::::1437155332::E57D1F9BFF3B404F9F30333629369B08DF5E2161::Mozilla Software Releases ::::::::::0:
@@ -27,24 +28,76 @@ fpr:::::::::F2EF4E6E6AE75B95F11F1EB51C69C4E55E9905DB:
sub:e:4096:1:BBBEBDBB24C6F355:1498143157:1561215157:::::s::::::23:
fpr:::::::::DCEAC5D96135B91C4EA672ABBBBEBDBB24C6F355:
sub:e:4096:1:F1A6668FBB7D572E:1559247338:1622319338:::::s::::::23:
-fpr:::::::::097B313077AE62A02F84DA4DF1A6668FBB7D572E:'''
+fpr:::::::::097B313077AE62A02F84DA4DF1A6668FBB7D572E:
+""".strip()
+
+# gpg --{show,list}-key output (key present and trusted)
+GPG_SHOWKEY_OUTPUT_TRUSTED = """
+tru::1:1616373715:0:3:1:5
+pub:f:4096:1:61B7B526D98F0353:1437155332:::-:::scSC::::::23::0:
+fpr:::::::::14F26682D0916CDD81E37B6D61B7B526D98F0353:
+uid:f::::1437155332::E57D1F9BFF3B404F9F30333629369B08DF5E2161::Mozilla Software Releases ::::::::::0:
+sub:e:4096:1:1C69C4E55E9905DB:1437155572:1500227572:::::s::::::23:
+fpr:::::::::F2EF4E6E6AE75B95F11F1EB51C69C4E55E9905DB:
+sub:e:4096:1:BBBEBDBB24C6F355:1498143157:1561215157:::::s::::::23:
+fpr:::::::::DCEAC5D96135B91C4EA672ABBBBEBDBB24C6F355:
+sub:e:4096:1:F1A6668FBB7D572E:1559247338:1622319338:::::s::::::23:
+fpr:::::::::097B313077AE62A02F84DA4DF1A6668FBB7D572E:
+""".strip()
+
+GPG_LIST_SECRET_KEY_OUTPUT = """
+sec:u:2048:1:58FCCBCC131FCCAB:1406639814:::u:::scSC:::+:::23::0:
+fpr:::::::::AC0F357BE07F1493C34DCAB258FCCBCC131FCCAB:
+grp:::::::::C1227FFDD039AD942F777EA0639E1F1EAA96AB12:
+uid:u::::1406639814::79311EDEA01302E0DBBB2F33AE799F8BB677652F::Pacman Keyring Master Key ::::::::::0:
+""".lstrip()
+
+GPG_CHECK_SIGNATURES_OUTPUT = """
+tru::1:1742507906:1750096255:3:1:5
+pub:f:4096:1:61B7B526D98F0353:1437155332:::-:::scSC::::::23:1742507897:1 https\x3a//185.125.188.26\x3a443:
+fpr:::::::::14F26682D0916CDD81E37B6D61B7B526D98F0353:
+uid:f::::1437155332::E57D1F9BFF3B404F9F30333629369B08DF5E2161::Mozilla Software Releases :::::::::1742507897:1:
+sig:!::1:61B7B526D98F0353:1437155332::::Mozilla Software Releases :13x:::::2:
+sig:!::1:58FCCBCC131FCCAB:1742507905::::Pacman Keyring Master Key :10l::AC0F357BE07F1493C34DCAB258FCCBCC131FCCAB:::8:
+sub:f:4096:1:E36D3B13F3D93274:1683308659:1746380659:::::s::::::23:
+fpr:::::::::ADD7079479700DCADFDD5337E36D3B13F3D93274:
+sig:!::1:61B7B526D98F0353:1683308659::::Mozilla Software Releases :18x::14F26682D0916CDD81E37B6D61B7B526D98F0353:::10:
+sub:e:4096:1:1C69C4E55E9905DB:1437155572:1500227572:::::s::::::23:
+fpr:::::::::F2EF4E6E6AE75B95F11F1EB51C69C4E55E9905DB:
+sig:!::1:61B7B526D98F0353:1437155572::::Mozilla Software Releases :18x:::::2:
+sub:e:4096:1:BBBEBDBB24C6F355:1498143157:1561215157:::::s::::::23:
+fpr:::::::::DCEAC5D96135B91C4EA672ABBBBEBDBB24C6F355:
+sig:!::1:61B7B526D98F0353:1498143157::::Mozilla Software Releases :18x::14F26682D0916CDD81E37B6D61B7B526D98F0353:::8:
+sub:e:4096:1:F1A6668FBB7D572E:1559247338:1622319338:::::s::::::23:
+fpr:::::::::097B313077AE62A02F84DA4DF1A6668FBB7D572E:
+sig:!::1:61B7B526D98F0353:1559247338::::Mozilla Software Releases :18x::14F26682D0916CDD81E37B6D61B7B526D98F0353:::10:
+sub:e:4096:1:EBE41E90F6F12F6D:1621282261:1684354261:::::s::::::23:
+fpr:::::::::4360FE2109C49763186F8E21EBE41E90F6F12F6D:
+sig:!::1:61B7B526D98F0353:1621282261::::Mozilla Software Releases :18x::14F26682D0916CDD81E37B6D61B7B526D98F0353:::10:
+""".strip()
# gpg --{show,list}-key output (key absent)
-GPG_NOKEY_OUTPUT = '''gpg: error reading key: No public key
-tru::1:1616373715:0:3:1:5'''
+GPG_NOKEY_OUTPUT = """
+gpg: error reading key: No public key
+tru::1:1616373715:0:3:1:5
+""".strip()
# pacman-key output (successful invocation)
-PACMAN_KEY_SUCCESS = '''==> Updating trust database...
-gpg: next trustdb check due at 2021-08-02'''
+PACMAN_KEY_SUCCESS = """
+==> Updating trust database...
+gpg: next trustdb check due at 2021-08-02
+""".strip()
# expected command for gpg --list-keys KEYID
RUN_CMD_LISTKEYS = [
MOCK_BIN_PATH,
+ '--homedir=/etc/pacman.d/gnupg',
+ '--no-permission-warning',
'--with-colons',
+ '--quiet',
'--batch',
'--no-tty',
'--no-default-keyring',
- '--keyring=/etc/pacman.d/gnupg/pubring.gpg',
'--list-keys',
TESTING_KEYID,
]
@@ -52,10 +105,12 @@ RUN_CMD_LISTKEYS = [
# expected command for gpg --show-keys KEYFILE
RUN_CMD_SHOW_KEYFILE = [
MOCK_BIN_PATH,
+ '--no-permission-warning',
'--with-colons',
- '--with-fingerprint',
+ '--quiet',
'--batch',
'--no-tty',
+ '--with-fingerprint',
'--show-keys',
TESTING_KEYFILE_PATH,
]
@@ -69,6 +124,29 @@ RUN_CMD_LSIGN_KEY = [
TESTING_KEYID,
]
+RUN_CMD_LIST_SECRET_KEY = [
+ MOCK_BIN_PATH,
+ '--homedir=/etc/pacman.d/gnupg',
+ '--no-permission-warning',
+ '--with-colons',
+ '--quiet',
+ '--batch',
+ '--no-tty',
+ '--list-secret-key',
+]
+
+# expected command for gpg --check-signatures
+RUN_CMD_CHECK_SIGNATURES = [
+ MOCK_BIN_PATH,
+ '--homedir=/etc/pacman.d/gnupg',
+ '--no-permission-warning',
+ '--with-colons',
+ '--quiet',
+ '--batch',
+ '--no-tty',
+ '--check-signatures',
+ TESTING_KEYID,
+]
TESTCASES = [
#
@@ -152,7 +230,7 @@ TESTCASES = [
{'check_rc': False},
(
0,
- GPG_SHOWKEY_OUTPUT,
+ GPG_SHOWKEY_OUTPUT_EXPIRED,
'',
),
),
@@ -222,7 +300,7 @@ TESTCASES = [
{'check_rc': False},
(
0,
- GPG_SHOWKEY_OUTPUT,
+ GPG_SHOWKEY_OUTPUT_EXPIRED,
'',
),
),
@@ -248,7 +326,79 @@ TESTCASES = [
{'check_rc': False},
(
0,
- GPG_SHOWKEY_OUTPUT,
+ GPG_SHOWKEY_OUTPUT_EXPIRED,
+ '',
+ ),
+ ),
+ ],
+ 'changed': False,
+ },
+ ],
+ # state present, ensure_trusted & key expired
+ [
+ {
+ 'state': 'present',
+ 'ensure_trusted': True,
+ 'id': TESTING_KEYID,
+ 'data': 'FAKEDATA',
+ '_ansible_check_mode': True,
+ },
+ {
+ 'id': 'state_present_trusted_key_expired',
+ 'run_command.calls': [
+ (
+ RUN_CMD_LISTKEYS,
+ {
+ 'check_rc': False,
+ },
+ (
+ 0,
+ GPG_SHOWKEY_OUTPUT_EXPIRED,
+ '',
+ ),
+ ),
+ ],
+ 'changed': True,
+ },
+ ],
+ # state present & key trusted
+ [
+ {
+ 'state': 'present',
+ 'ensure_trusted': True,
+ 'id': TESTING_KEYID,
+ 'data': 'FAKEDATA',
+ '_ansible_check_mode': True,
+ },
+ {
+ 'id': 'state_present_and_key_trusted',
+ 'run_command.calls': [
+ (
+ RUN_CMD_LISTKEYS,
+ {
+ 'check_rc': False,
+ },
+ (
+ 0,
+ GPG_SHOWKEY_OUTPUT_TRUSTED,
+ '',
+ ),
+ ),
+ (
+ RUN_CMD_CHECK_SIGNATURES,
+ {},
+ (
+ 0,
+ GPG_CHECK_SIGNATURES_OUTPUT,
+ '',
+ ),
+ ),
+ (
+ RUN_CMD_LIST_SECRET_KEY,
+ {},
+ (
+ 0,
+ GPG_LIST_SECRET_KEY_OUTPUT,
'',
),
),
@@ -270,7 +420,7 @@ TESTCASES = [
{'check_rc': False},
(
0,
- GPG_SHOWKEY_OUTPUT,
+ GPG_SHOWKEY_OUTPUT_EXPIRED,
'',
),
),
@@ -339,7 +489,7 @@ TESTCASES = [
{'check_rc': True},
(
0,
- GPG_SHOWKEY_OUTPUT,
+ GPG_SHOWKEY_OUTPUT_EXPIRED,
'',
),
),
@@ -397,7 +547,7 @@ TESTCASES = [
{'check_rc': True},
(
0,
- GPG_SHOWKEY_OUTPUT.replace('61B7B526D98F0353', '61B7B526D98F0354'),
+ GPG_SHOWKEY_OUTPUT_EXPIRED.replace('61B7B526D98F0353', '61B7B526D98F0354'),
'',
),
),
@@ -485,7 +635,7 @@ gpg: imported: 1
{'check_rc': True},
(
0,
- GPG_SHOWKEY_OUTPUT,
+ GPG_SHOWKEY_OUTPUT_EXPIRED,
'',
),
),
diff --git a/tests/unit/plugins/modules/test_proxmox_backup.py b/tests/unit/plugins/modules/test_proxmox_backup.py
deleted file mode 100644
index 372347d279..0000000000
--- a/tests/unit/plugins/modules/test_proxmox_backup.py
+++ /dev/null
@@ -1,374 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# Copyright (c) 2019, Ansible Project
-# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
-# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import (absolute_import, division, print_function)
-import \
- ansible_collections.community.general.plugins.module_utils.proxmox as proxmox_utils
-from ansible_collections.community.general.plugins.modules import proxmox_backup
-from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import (
- AnsibleExitJson, AnsibleFailJson, set_module_args, ModuleTestCase)
-from ansible_collections.community.internal_test_tools.tests.unit.compat.mock import patch
-
-__metaclass__ = type
-
-import pytest
-
-proxmoxer = pytest.importorskip('proxmoxer')
-
-
-MINIMAL_PERMISSIONS = {
- '/sdn/zones': {'Datastore.AllocateSpace': 1, 'Datastore.Audit': 1},
- '/nodes': {'Datastore.AllocateSpace': 1, 'Datastore.Audit': 1},
- '/sdn': {'Datastore.AllocateSpace': 1, 'Datastore.Audit': 1},
- '/vms': {'VM.Audit': 1,
- 'Sys.Audit': 1,
- 'Mapping.Audit': 1,
- 'VM.Backup': 1,
- 'Datastore.Audit': 1,
- 'SDN.Audit': 1,
- 'Pool.Audit': 1},
- '/': {'Datastore.Audit': 1, 'Datastore.AllocateSpace': 1},
- '/storage/local-zfs': {'Datastore.AllocateSpace': 1,
- 'Datastore.Audit': 1},
- '/storage': {'Datastore.AllocateSpace': 1, 'Datastore.Audit': 1},
- '/access': {'Datastore.AllocateSpace': 1, 'Datastore.Audit': 1},
- '/vms/101': {'VM.Backup': 1,
- 'Mapping.Audit': 1,
- 'Datastore.AllocateSpace': 0,
- 'Sys.Audit': 1,
- 'VM.Audit': 1,
- 'SDN.Audit': 1,
- 'Pool.Audit': 1,
- 'Datastore.Audit': 1},
- '/vms/100': {'VM.Backup': 1,
- 'Mapping.Audit': 1,
- 'Datastore.AllocateSpace': 0,
- 'Sys.Audit': 1,
- 'VM.Audit': 1,
- 'SDN.Audit': 1,
- 'Pool.Audit': 1,
- 'Datastore.Audit': 1},
- '/pool': {'Datastore.Audit': 1, 'Datastore.AllocateSpace': 1}, }
-
-STORAGE = [{'type': 'pbs',
- 'username': 'test@pbs',
- 'datastore': 'Backup-Pool',
- 'server': '10.0.0.1',
- 'shared': 1,
- 'fingerprint': '94:fd:ac:e7:d5:36:0e:11:5b:23:05:40:d2:a4:e1:8a:c1:52:41:01:07:28:c0:4d:c5:ee:df:7f:7c:03:ab:41',
- 'prune-backups': 'keep-all=1',
- 'storage': 'backup',
- 'content': 'backup',
- 'digest': 'ca46a68d7699de061c139d714892682ea7c9d681'},
- {'nodes': 'node1,node2,node3',
- 'sparse': 1,
- 'type': 'zfspool',
- 'content': 'rootdir,images',
- 'digest': 'ca46a68d7699de061c139d714892682ea7c9d681',
- 'pool': 'rpool/data',
- 'storage': 'local-zfs'}]
-
-
-VMS = [{"diskwrite": 0,
- "vmid": 100,
- "node": "node1",
- "id": "lxc/100",
- "maxdisk": 10000,
- "template": 0,
- "disk": 10000,
- "uptime": 10000,
- "maxmem": 10000,
- "maxcpu": 1,
- "netin": 10000,
- "type": "lxc",
- "netout": 10000,
- "mem": 10000,
- "diskread": 10000,
- "cpu": 0.01,
- "name": "test-lxc",
- "status": "running"},
- {"diskwrite": 0,
- "vmid": 101,
- "node": "node2",
- "id": "kvm/101",
- "maxdisk": 10000,
- "template": 0,
- "disk": 10000,
- "uptime": 10000,
- "maxmem": 10000,
- "maxcpu": 1,
- "netin": 10000,
- "type": "lxc",
- "netout": 10000,
- "mem": 10000,
- "diskread": 10000,
- "cpu": 0.01,
- "name": "test-kvm",
- "status": "running"}
- ]
-
-NODES = [{'level': '',
- 'type': 'node',
- 'node': 'node1',
- 'status': 'online',
- 'id': 'node/node1',
- 'cgroup-mode': 2},
- {'status': 'online',
- 'id': 'node/node2',
- 'cgroup-mode': 2,
- 'level': '',
- 'node': 'node2',
- 'type': 'node'},
- {'status': 'online',
- 'id': 'node/node3',
- 'cgroup-mode': 2,
- 'level': '',
- 'node': 'node3',
- 'type': 'node'},
- ]
-
-TASK_API_RETURN = {
- "node1": {
- 'starttime': 1732606253,
- 'status': 'stopped',
- 'type': 'vzdump',
- 'pstart': 517463911,
- 'upid': 'UPID:node1:003F8C63:1E7FB79C:67449780:vzdump:100:root@pam:',
- 'id': '100',
- 'node': 'hypervisor',
- 'pid': 541669,
- 'user': 'test@pve',
- 'exitstatus': 'OK'},
- "node2": {
- 'starttime': 1732606253,
- 'status': 'stopped',
- 'type': 'vzdump',
- 'pstart': 517463911,
- 'upid': 'UPID:node2:000029DD:1599528B:6108F068:vzdump:101:root@pam:',
- 'id': '101',
- 'node': 'hypervisor',
- 'pid': 541669,
- 'user': 'test@pve',
- 'exitstatus': 'OK'},
-}
-
-
-VZDUMP_API_RETURN = {
- "node1": "UPID:node1:003F8C63:1E7FB79C:67449780:vzdump:100:root@pam:",
- "node2": "UPID:node2:000029DD:1599528B:6108F068:vzdump:101:root@pam:",
- "node3": "OK",
-}
-
-
-TASKLOG_API_RETURN = {"node1": [{'n': 1,
- 't': "INFO: starting new backup job: vzdump 100 --mode snapshot --node node1 "
- "--notes-template '{{guestname}}' --storage backup --notification-mode auto"},
- {'t': 'INFO: Starting Backup of VM 100 (lxc)',
- 'n': 2},
- {'n': 23, 't': 'INFO: adding notes to backup'},
- {'n': 24,
- 't': 'INFO: Finished Backup of VM 100 (00:00:03)'},
- {'n': 25,
- 't': 'INFO: Backup finished at 2024-11-25 16:28:03'},
- {'t': 'INFO: Backup job finished successfully',
- 'n': 26},
- {'n': 27, 't': 'TASK OK'}],
- "node2": [{'n': 1,
- 't': "INFO: starting new backup job: vzdump 101 --mode snapshot --node node2 "
- "--notes-template '{{guestname}}' --storage backup --notification-mode auto"},
- {'t': 'INFO: Starting Backup of VM 101 (kvm)',
- 'n': 2},
- {'n': 24,
- 't': 'INFO: Finished Backup of VM 100 (00:00:03)'},
- {'n': 25,
- 't': 'INFO: Backup finished at 2024-11-25 16:28:03'},
- {'t': 'INFO: Backup job finished successfully',
- 'n': 26},
- {'n': 27, 't': 'TASK OK'}],
- }
-
-
-def return_valid_resources(resource_type, *args, **kwargs):
- if resource_type == "vm":
- return VMS
- if resource_type == "node":
- return NODES
-
-
-def return_vzdump_api(node, *args, **kwargs):
- if node in ("node1", "node2", "node3"):
- return VZDUMP_API_RETURN[node]
-
-
-def return_logs_api(node, *args, **kwargs):
- if node in ("node1", "node2"):
- return TASKLOG_API_RETURN[node]
-
-
-def return_task_status_api(node, *args, **kwargs):
- if node in ("node1", "node2"):
- return TASK_API_RETURN[node]
-
-
-class TestProxmoxBackup(ModuleTestCase):
- def setUp(self):
- super(TestProxmoxBackup, self).setUp()
- proxmox_utils.HAS_PROXMOXER = True
- self.module = proxmox_backup
- self.connect_mock = patch(
- "ansible_collections.community.general.plugins.module_utils.proxmox.ProxmoxAnsible._connect",
- ).start()
- self.mock_get_permissions = patch.object(
- proxmox_backup.ProxmoxBackupAnsible, "_get_permissions").start()
- self.mock_get_storages = patch.object(proxmox_utils.ProxmoxAnsible,
- "get_storages").start()
- self.mock_get_resources = patch.object(
- proxmox_backup.ProxmoxBackupAnsible, "_get_resources").start()
- self.mock_get_tasklog = patch.object(
- proxmox_backup.ProxmoxBackupAnsible, "_get_tasklog").start()
- self.mock_post_vzdump = patch.object(
- proxmox_backup.ProxmoxBackupAnsible, "_post_vzdump").start()
- self.mock_get_taskok = patch.object(
- proxmox_backup.ProxmoxBackupAnsible, "_get_taskok").start()
- self.mock_get_permissions.return_value = MINIMAL_PERMISSIONS
- self.mock_get_storages.return_value = STORAGE
- self.mock_get_resources.side_effect = return_valid_resources
- self.mock_get_taskok.side_effect = return_task_status_api
- self.mock_get_tasklog.side_effect = return_logs_api
- self.mock_post_vzdump.side_effect = return_vzdump_api
-
- def tearDown(self):
- self.connect_mock.stop()
- self.mock_get_permissions.stop()
- self.mock_get_storages.stop()
- self.mock_get_resources.stop()
- super(TestProxmoxBackup, self).tearDown()
-
- def test_proxmox_backup_without_argument(self):
- with set_module_args({}):
- with pytest.raises(AnsibleFailJson):
- proxmox_backup.main()
-
- def test_create_backup_check_mode(self):
- with set_module_args(
- {
- "api_user": "root@pam",
- "api_password": "secret",
- "api_host": "127.0.0.1",
- "mode": "all",
- "storage": "backup",
- "_ansible_check_mode": True,
- }
- ):
- with pytest.raises(AnsibleExitJson) as exc_info:
- proxmox_backup.main()
-
- result = exc_info.value.args[0]
-
- assert result["changed"] is True
- assert result["msg"] == "Backups would be created"
- assert len(result["backups"]) == 0
- assert self.mock_get_taskok.call_count == 0
- assert self.mock_get_tasklog.call_count == 0
- assert self.mock_post_vzdump.call_count == 0
-
- def test_create_backup_all_mode(self):
- with set_module_args({
- "api_user": "root@pam",
- "api_password": "secret",
- "api_host": "127.0.0.1",
- "mode": "all",
- "storage": "backup",
- }):
- with pytest.raises(AnsibleExitJson) as exc_info:
- proxmox_backup.main()
-
- result = exc_info.value.args[0]
- assert result["changed"] is True
- assert result["msg"] == "Backup tasks created"
- for backup_result in result["backups"]:
- assert backup_result["upid"] in {
- VZDUMP_API_RETURN[key] for key in VZDUMP_API_RETURN}
- assert self.mock_get_taskok.call_count == 0
- assert self.mock_post_vzdump.call_count == 3
-
- def test_create_backup_include_mode_with_wait(self):
- with set_module_args({
- "api_user": "root@pam",
- "api_password": "secret",
- "api_host": "127.0.0.1",
- "mode": "include",
- "node": "node1",
- "storage": "backup",
- "vmids": [100],
- "wait": True
- }):
- with pytest.raises(AnsibleExitJson) as exc_info:
- proxmox_backup.main()
-
- result = exc_info.value.args[0]
- assert result["changed"] is True
- assert result["msg"] == "Backups succeeded"
- for backup_result in result["backups"]:
- assert backup_result["upid"] in {
- VZDUMP_API_RETURN[key] for key in VZDUMP_API_RETURN}
- assert self.mock_get_taskok.call_count == 1
- assert self.mock_post_vzdump.call_count == 1
-
- def test_fail_insufficient_permissions(self):
- with set_module_args({
- "api_user": "root@pam",
- "api_password": "secret",
- "api_host": "127.0.0.1",
- "mode": "include",
- "storage": "backup",
- "performance_tweaks": "max-workers=2",
- "vmids": [100],
- "wait": True
- }):
- with pytest.raises(AnsibleFailJson) as exc_info:
- proxmox_backup.main()
-
- result = exc_info.value.args[0]
- assert result["msg"] == "Insufficient permission: Performance_tweaks and bandwidth require 'Sys.Modify' permission for '/'"
- assert self.mock_get_taskok.call_count == 0
- assert self.mock_post_vzdump.call_count == 0
-
- def test_fail_missing_node(self):
- with set_module_args({
- "api_user": "root@pam",
- "api_password": "secret",
- "api_host": "127.0.0.1",
- "mode": "include",
- "storage": "backup",
- "node": "nonexistingnode",
- "vmids": [100],
- "wait": True
- }):
- with pytest.raises(AnsibleFailJson) as exc_info:
- proxmox_backup.main()
-
- result = exc_info.value.args[0]
- assert result["msg"] == "Node nonexistingnode was specified, but does not exist on the cluster"
- assert self.mock_get_taskok.call_count == 0
- assert self.mock_post_vzdump.call_count == 0
-
- def test_fail_missing_storage(self):
- with set_module_args({
- "api_user": "root@pam",
- "api_password": "secret",
- "api_host": "127.0.0.1",
- "mode": "include",
- "storage": "nonexistingstorage",
- "vmids": [100],
- "wait": True
- }):
- with pytest.raises(AnsibleFailJson) as exc_info:
- proxmox_backup.main()
-
- result = exc_info.value.args[0]
- assert result["msg"] == "Storage nonexistingstorage does not exist in the cluster"
- assert self.mock_get_taskok.call_count == 0
- assert self.mock_post_vzdump.call_count == 0
diff --git a/tests/unit/plugins/modules/test_proxmox_backup_info.py b/tests/unit/plugins/modules/test_proxmox_backup_info.py
deleted file mode 100644
index da63ce3fe7..0000000000
--- a/tests/unit/plugins/modules/test_proxmox_backup_info.py
+++ /dev/null
@@ -1,275 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# Copyright (c) 2024 Marzieh Raoufnezhad
-# Copyright (c) 2024 Maryam Mayabi
-# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-from __future__ import absolute_import, division, print_function
-
-__metaclass__ = type
-
-import pytest
-
-proxmoxer = pytest.importorskip("proxmoxer")
-
-from ansible_collections.community.general.plugins.modules import proxmox_backup_info
-from ansible_collections.community.internal_test_tools.tests.unit.compat.mock import patch
-from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import (
- AnsibleExitJson,
- AnsibleFailJson,
- ModuleTestCase,
- set_module_args,
-)
-import ansible_collections.community.general.plugins.module_utils.proxmox as proxmox_utils
-
-RESOURCE_LIST = [
- {
- "uptime": 0,
- "diskwrite": 0,
- "name": "test01",
- "maxcpu": 0,
- "node": "NODE1",
- "mem": 0,
- "netout": 0,
- "netin": 0,
- "maxmem": 0,
- "diskread": 0,
- "disk": 0,
- "maxdisk": 0,
- "status": "running",
- "cpu": 0,
- "id": "qemu/100",
- "template": 0,
- "vmid": 100,
- "type": "qemu"
- },
- {
- "uptime": 0,
- "diskwrite": 0,
- "name": "test02",
- "maxcpu": 0,
- "node": "NODE1",
- "mem": 0,
- "netout": 0,
- "netin": 0,
- "maxmem": 0,
- "diskread": 0,
- "disk": 0,
- "maxdisk": 0,
- "status": "running",
- "cpu": 0,
- "id": "qemu/101",
- "template": 0,
- "vmid": 101,
- "type": "qemu"
- },
- {
- "uptime": 0,
- "diskwrite": 0,
- "name": "test03",
- "maxcpu": 0,
- "node": "NODE2",
- "mem": 0,
- "netout": 0,
- "netin": 0,
- "maxmem": 0,
- "diskread": 0,
- "disk": 0,
- "maxdisk": 0,
- "status": "running",
- "cpu": 0,
- "id": "qemu/102",
- "template": 0,
- "vmid": 102,
- "type": "qemu"
- }
-]
-BACKUP_JOBS = [
- {
- "type": "vzdump",
- "id": "backup-83831498-c631",
- "storage": "local",
- "vmid": "100",
- "enabled": 1,
- "next-run": 1735138800,
- "mailnotification": "always",
- "schedule": "06,18:30",
- "mode": "snapshot",
- "notes-template": "guestname"
- },
- {
- "schedule": "sat 15:00",
- "notes-template": "guestname",
- "mode": "snapshot",
- "mailnotification": "always",
- "next-run": 1735385400,
- "type": "vzdump",
- "enabled": 1,
- "vmid": "100,101,102",
- "storage": "local",
- "id": "backup-70025700-2302",
- }
-]
-
-EXPECTED_BACKUP_OUTPUT = [
- {
- "bktype": "vzdump",
- "enabled": 1,
- "id": "backup-83831498-c631",
- "mode": "snapshot",
- "next-run": "2024-12-25 15:00:00",
- "schedule": "06,18:30",
- "storage": "local",
- "vm_name": "test01",
- "vmid": "100"
- },
- {
- "bktype": "vzdump",
- "enabled": 1,
- "id": "backup-70025700-2302",
- "mode": "snapshot",
- "next-run": "2024-12-28 11:30:00",
- "schedule": "sat 15:00",
- "storage": "local",
- "vm_name": "test01",
- "vmid": "100"
- },
- {
- "bktype": "vzdump",
- "enabled": 1,
- "id": "backup-70025700-2302",
- "mode": "snapshot",
- "next-run": "2024-12-28 11:30:00",
- "schedule": "sat 15:00",
- "storage": "local",
- "vm_name": "test02",
- "vmid": "101"
- },
- {
- "bktype": "vzdump",
- "enabled": 1,
- "id": "backup-70025700-2302",
- "mode": "snapshot",
- "next-run": "2024-12-28 11:30:00",
- "schedule": "sat 15:00",
- "storage": "local",
- "vm_name": "test03",
- "vmid": "102"
- }
-]
-EXPECTED_BACKUP_JOBS_OUTPUT = [
- {
- "enabled": 1,
- "id": "backup-83831498-c631",
- "mailnotification": "always",
- "mode": "snapshot",
- "next-run": 1735138800,
- "notes-template": "guestname",
- "schedule": "06,18:30",
- "storage": "local",
- "type": "vzdump",
- "vmid": "100"
- },
- {
- "enabled": 1,
- "id": "backup-70025700-2302",
- "mailnotification": "always",
- "mode": "snapshot",
- "next-run": 1735385400,
- "notes-template": "guestname",
- "schedule": "sat 15:00",
- "storage": "local",
- "type": "vzdump",
- "vmid": "100,101,102"
- }
-]
-
-
-class TestProxmoxBackupInfoModule(ModuleTestCase):
- def setUp(self):
- super(TestProxmoxBackupInfoModule, self).setUp()
- proxmox_utils.HAS_PROXMOXER = True
- self.module = proxmox_backup_info
- self.connect_mock = patch(
- "ansible_collections.community.general.plugins.module_utils.proxmox.ProxmoxAnsible._connect",
- ).start()
- self.connect_mock.return_value.cluster.resources.get.return_value = (
- RESOURCE_LIST
- )
- self.connect_mock.return_value.cluster.backup.get.return_value = (
- BACKUP_JOBS
- )
-
- def tearDown(self):
- self.connect_mock.stop()
- super(TestProxmoxBackupInfoModule, self).tearDown()
-
- def test_module_fail_when_required_args_missing(self):
- with pytest.raises(AnsibleFailJson) as exc_info:
- with set_module_args({}):
- self.module.main()
-
- result = exc_info.value.args[0]
- assert result["msg"] == "missing required arguments: api_host, api_user"
-
- def test_get_all_backups_information(self):
- with pytest.raises(AnsibleExitJson) as exc_info:
- with set_module_args({
- 'api_host': 'proxmoxhost',
- 'api_user': 'root@pam',
- 'api_password': 'supersecret'
- }):
- self.module.main()
-
- result = exc_info.value.args[0]
- assert result["backup_info"] == EXPECTED_BACKUP_OUTPUT
-
- def test_get_specific_backup_information_by_vmname(self):
- with pytest.raises(AnsibleExitJson) as exc_info:
- vmname = 'test01'
- expected_output = [
- backup for backup in EXPECTED_BACKUP_OUTPUT if backup["vm_name"] == vmname
- ]
- with set_module_args({
- 'api_host': 'proxmoxhost',
- 'api_user': 'root@pam',
- 'api_password': 'supersecret',
- 'vm_name': vmname
- }):
- self.module.main()
-
- result = exc_info.value.args[0]
- assert result["backup_info"] == expected_output
- assert len(result["backup_info"]) == 2
-
- def test_get_specific_backup_information_by_vmid(self):
- with pytest.raises(AnsibleExitJson) as exc_info:
- vmid = "101"
- expected_output = [
- backup for backup in EXPECTED_BACKUP_OUTPUT if backup["vmid"] == vmid
- ]
- with set_module_args({
- 'api_host': 'proxmoxhost',
- 'api_user': 'root@pam',
- 'api_password': 'supersecret',
- 'vm_id': vmid
- }):
- self.module.main()
- result = exc_info.value.args[0]
- assert result["backup_info"] == expected_output
- assert len(result["backup_info"]) == 1
-
- def test_get_specific_backup_information_by_backupjobs(self):
- with pytest.raises(AnsibleExitJson) as exc_info:
- backupjobs = True
- with set_module_args({
- 'api_host': 'proxmoxhost',
- 'api_user': 'root@pam',
- 'api_password': 'supersecret',
- 'backup_jobs': backupjobs
- }):
- self.module.main()
-
- result = exc_info.value.args[0]
- assert result["backup_info"] == EXPECTED_BACKUP_JOBS_OUTPUT
diff --git a/tests/unit/plugins/modules/test_proxmox_kvm.py b/tests/unit/plugins/modules/test_proxmox_kvm.py
deleted file mode 100644
index ffc806bdc8..0000000000
--- a/tests/unit/plugins/modules/test_proxmox_kvm.py
+++ /dev/null
@@ -1,168 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# Copyright (c) 2021, Ansible Project
-# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-from __future__ import absolute_import, division, print_function
-
-__metaclass__ = type
-
-import sys
-
-import pytest
-
-proxmoxer = pytest.importorskip("proxmoxer")
-mandatory_py_version = pytest.mark.skipif(
- sys.version_info < (2, 7),
- reason="The proxmoxer dependency requires python2.7 or higher",
-)
-
-from ansible_collections.community.general.plugins.modules import proxmox_kvm
-from ansible_collections.community.internal_test_tools.tests.unit.compat.mock import (
- patch,
- DEFAULT,
-)
-from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import (
- AnsibleExitJson,
- AnsibleFailJson,
- ModuleTestCase,
- set_module_args,
-)
-import ansible_collections.community.general.plugins.module_utils.proxmox as proxmox_utils
-
-
-class TestProxmoxKvmModule(ModuleTestCase):
- def setUp(self):
- super(TestProxmoxKvmModule, self).setUp()
- proxmox_utils.HAS_PROXMOXER = True
- self.module = proxmox_kvm
- self.connect_mock = patch(
- "ansible_collections.community.general.plugins.module_utils.proxmox.ProxmoxAnsible._connect"
- ).start()
- self.get_node_mock = patch.object(
- proxmox_utils.ProxmoxAnsible, "get_node"
- ).start()
- self.get_vm_mock = patch.object(proxmox_utils.ProxmoxAnsible, "get_vm").start()
- self.create_vm_mock = patch.object(
- proxmox_kvm.ProxmoxKvmAnsible, "create_vm"
- ).start()
-
- def tearDown(self):
- self.create_vm_mock.stop()
- self.get_vm_mock.stop()
- self.get_node_mock.stop()
- self.connect_mock.stop()
- super(TestProxmoxKvmModule, self).tearDown()
-
- def test_module_fail_when_required_args_missing(self):
- with self.assertRaises(AnsibleFailJson):
- with set_module_args({}):
- self.module.main()
-
- def test_module_exits_unchaged_when_provided_vmid_exists(self):
- with set_module_args(
- {
- "api_host": "host",
- "api_user": "user",
- "api_password": "password",
- "vmid": "100",
- "node": "pve",
- }
- ):
- self.get_vm_mock.return_value = [{"vmid": "100"}]
- with pytest.raises(AnsibleExitJson) as exc_info:
- self.module.main()
-
- assert self.get_vm_mock.call_count == 1
- result = exc_info.value.args[0]
- assert result["changed"] is False
- assert result["msg"] == "VM with vmid <100> already exists"
-
- def test_vm_created_when_vmid_not_exist_but_name_already_exist(self):
- with set_module_args(
- {
- "api_host": "host",
- "api_user": "user",
- "api_password": "password",
- "vmid": "100",
- "name": "existing.vm.local",
- "node": "pve",
- }
- ):
- self.get_vm_mock.return_value = None
- with pytest.raises(AnsibleExitJson) as exc_info:
- self.module.main()
-
- assert self.get_vm_mock.call_count == 1
- assert self.get_node_mock.call_count == 1
- result = exc_info.value.args[0]
- assert result["changed"] is True
- assert result["msg"] == "VM existing.vm.local with vmid 100 deployed"
-
- def test_vm_not_created_when_name_already_exist_and_vmid_not_set(self):
- with set_module_args(
- {
- "api_host": "host",
- "api_user": "user",
- "api_password": "password",
- "name": "existing.vm.local",
- "node": "pve",
- }
- ):
- with patch.object(proxmox_utils.ProxmoxAnsible, "get_vmid") as get_vmid_mock:
- get_vmid_mock.return_value = {
- "vmid": 100,
- "name": "existing.vm.local",
- }
- with pytest.raises(AnsibleExitJson) as exc_info:
- self.module.main()
-
- assert get_vmid_mock.call_count == 1
- result = exc_info.value.args[0]
- assert result["changed"] is False
-
- def test_vm_created_when_name_doesnt_exist_and_vmid_not_set(self):
- with set_module_args(
- {
- "api_host": "host",
- "api_user": "user",
- "api_password": "password",
- "name": "existing.vm.local",
- "node": "pve",
- }
- ):
- self.get_vm_mock.return_value = None
- with patch.multiple(
- proxmox_utils.ProxmoxAnsible, get_vmid=DEFAULT, get_nextvmid=DEFAULT
- ) as utils_mock:
- utils_mock["get_vmid"].return_value = None
- utils_mock["get_nextvmid"].return_value = 101
- with pytest.raises(AnsibleExitJson) as exc_info:
- self.module.main()
-
- assert utils_mock["get_vmid"].call_count == 1
- assert utils_mock["get_nextvmid"].call_count == 1
- result = exc_info.value.args[0]
- assert result["changed"] is True
- assert result["msg"] == "VM existing.vm.local with vmid 101 deployed"
-
- def test_parse_mac(self):
- assert (
- proxmox_kvm.parse_mac("virtio=00:11:22:AA:BB:CC,bridge=vmbr0,firewall=1")
- == "00:11:22:AA:BB:CC"
- )
-
- def test_parse_dev(self):
- assert (
- proxmox_kvm.parse_dev("local-lvm:vm-1000-disk-0,format=qcow2")
- == "local-lvm:vm-1000-disk-0"
- )
- assert (
- proxmox_kvm.parse_dev("local-lvm:vm-101-disk-1,size=8G")
- == "local-lvm:vm-101-disk-1"
- )
- assert (
- proxmox_kvm.parse_dev("local-zfs:vm-1001-disk-0")
- == "local-zfs:vm-1001-disk-0"
- )
diff --git a/tests/unit/plugins/modules/test_proxmox_snap.py b/tests/unit/plugins/modules/test_proxmox_snap.py
deleted file mode 100644
index 2aef4cb982..0000000000
--- a/tests/unit/plugins/modules/test_proxmox_snap.py
+++ /dev/null
@@ -1,131 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# Copyright (c) 2019, Ansible Project
-# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
-# SPDX-License-Identifier: GPL-3.0-or-later
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-import json
-import sys
-
-import pytest
-
-proxmoxer = pytest.importorskip('proxmoxer')
-mandatory_py_version = pytest.mark.skipif(
- sys.version_info < (2, 7),
- reason='The proxmoxer dependency requires python2.7 or higher'
-)
-
-from ansible_collections.community.internal_test_tools.tests.unit.compat.mock import MagicMock, patch
-from ansible_collections.community.general.plugins.modules import proxmox_snap
-import ansible_collections.community.general.plugins.module_utils.proxmox as proxmox_utils
-from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import set_module_args
-
-
-def get_resources(type):
- return [{"diskwrite": 0,
- "vmid": 100,
- "node": "localhost",
- "id": "lxc/100",
- "maxdisk": 10000,
- "template": 0,
- "disk": 10000,
- "uptime": 10000,
- "maxmem": 10000,
- "maxcpu": 1,
- "netin": 10000,
- "type": "lxc",
- "netout": 10000,
- "mem": 10000,
- "diskread": 10000,
- "cpu": 0.01,
- "name": "test-lxc",
- "status": "running"}]
-
-
-def fake_api(mocker):
- r = mocker.MagicMock()
- r.cluster.resources.get = MagicMock(side_effect=get_resources)
- return r
-
-
-def test_proxmox_snap_without_argument(capfd):
- with set_module_args({}):
- with pytest.raises(SystemExit) as results:
- proxmox_snap.main()
-
- out, err = capfd.readouterr()
- assert not err
- assert json.loads(out)['failed']
-
-
-@patch('ansible_collections.community.general.plugins.module_utils.proxmox.ProxmoxAnsible._connect')
-def test_create_snapshot_check_mode(connect_mock, capfd, mocker):
- with set_module_args({
- "hostname": "test-lxc",
- "api_user": "root@pam",
- "api_password": "secret",
- "api_host": "127.0.0.1",
- "state": "present",
- "snapname": "test",
- "timeout": "1",
- "force": True,
- "_ansible_check_mode": True
- }):
- proxmox_utils.HAS_PROXMOXER = True
- connect_mock.side_effect = lambda: fake_api(mocker)
- with pytest.raises(SystemExit) as results:
- proxmox_snap.main()
-
- out, err = capfd.readouterr()
- assert not err
- assert not json.loads(out)['changed']
-
-
-@patch('ansible_collections.community.general.plugins.module_utils.proxmox.ProxmoxAnsible._connect')
-def test_remove_snapshot_check_mode(connect_mock, capfd, mocker):
- with set_module_args({
- "hostname": "test-lxc",
- "api_user": "root@pam",
- "api_password": "secret",
- "api_host": "127.0.0.1",
- "state": "absent",
- "snapname": "test",
- "timeout": "1",
- "force": True,
- "_ansible_check_mode": True
- }):
- proxmox_utils.HAS_PROXMOXER = True
- connect_mock.side_effect = lambda: fake_api(mocker)
- with pytest.raises(SystemExit) as results:
- proxmox_snap.main()
-
- out, err = capfd.readouterr()
- assert not err
- assert not json.loads(out)['changed']
-
-
-@patch('ansible_collections.community.general.plugins.module_utils.proxmox.ProxmoxAnsible._connect')
-def test_rollback_snapshot_check_mode(connect_mock, capfd, mocker):
- with set_module_args({
- "hostname": "test-lxc",
- "api_user": "root@pam",
- "api_password": "secret",
- "api_host": "127.0.0.1",
- "state": "rollback",
- "snapname": "test",
- "timeout": "1",
- "force": True,
- "_ansible_check_mode": True
- }):
- proxmox_utils.HAS_PROXMOXER = True
- connect_mock.side_effect = lambda: fake_api(mocker)
- with pytest.raises(SystemExit) as results:
- proxmox_snap.main()
-
- out, err = capfd.readouterr()
- assert not err
- output = json.loads(out)
- assert not output['changed']
- assert output['msg'] == "Snapshot test does not exist"
diff --git a/tests/unit/plugins/modules/test_proxmox_storage_contents_info.py b/tests/unit/plugins/modules/test_proxmox_storage_contents_info.py
deleted file mode 100644
index 674dc45ac9..0000000000
--- a/tests/unit/plugins/modules/test_proxmox_storage_contents_info.py
+++ /dev/null
@@ -1,90 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# Copyright (c) 2023, Julian Vanden Broeck
-# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-from __future__ import absolute_import, division, print_function
-
-__metaclass__ = type
-
-import pytest
-
-proxmoxer = pytest.importorskip("proxmoxer")
-
-from ansible_collections.community.general.plugins.modules import proxmox_storage_contents_info
-from ansible_collections.community.internal_test_tools.tests.unit.compat.mock import patch
-from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import (
- AnsibleExitJson,
- AnsibleFailJson,
- ModuleTestCase,
- set_module_args,
-)
-import ansible_collections.community.general.plugins.module_utils.proxmox as proxmox_utils
-
-NODE1 = "pve"
-RAW_LIST_OUTPUT = [
- {
- "content": "backup",
- "ctime": 1702528474,
- "format": "pbs-vm",
- "size": 273804166061,
- "subtype": "qemu",
- "vmid": 931,
- "volid": "datastore:backup/vm/931/2023-12-14T04:34:34Z",
- },
- {
- "content": "backup",
- "ctime": 1702582560,
- "format": "pbs-vm",
- "size": 273804166059,
- "subtype": "qemu",
- "vmid": 931,
- "volid": "datastore:backup/vm/931/2023-12-14T19:36:00Z",
- },
-]
-
-
-def get_module_args(node, storage, content="all", vmid=None):
- return {
- "api_host": "host",
- "api_user": "user",
- "api_password": "password",
- "node": node,
- "storage": storage,
- "content": content,
- "vmid": vmid,
- }
-
-
-class TestProxmoxStorageContentsInfo(ModuleTestCase):
- def setUp(self):
- super(TestProxmoxStorageContentsInfo, self).setUp()
- proxmox_utils.HAS_PROXMOXER = True
- self.module = proxmox_storage_contents_info
- self.connect_mock = patch(
- "ansible_collections.community.general.plugins.module_utils.proxmox.ProxmoxAnsible._connect",
- ).start()
- self.connect_mock.return_value.nodes.return_value.storage.return_value.content.return_value.get.return_value = (
- RAW_LIST_OUTPUT
- )
- self.connect_mock.return_value.nodes.get.return_value = [{"node": NODE1}]
-
- def tearDown(self):
- self.connect_mock.stop()
- super(TestProxmoxStorageContentsInfo, self).tearDown()
-
- def test_module_fail_when_required_args_missing(self):
- with pytest.raises(AnsibleFailJson) as exc_info:
- with set_module_args({}):
- self.module.main()
-
- def test_storage_contents_info(self):
- with pytest.raises(AnsibleExitJson) as exc_info:
- with set_module_args(get_module_args(node=NODE1, storage="datastore")):
- expected_output = {}
- self.module.main()
-
- result = exc_info.value.args[0]
- assert not result["changed"]
- assert result["proxmox_storage_content"] == RAW_LIST_OUTPUT
diff --git a/tests/unit/plugins/modules/test_proxmox_tasks_info.py b/tests/unit/plugins/modules/test_proxmox_tasks_info.py
deleted file mode 100644
index 08398d1504..0000000000
--- a/tests/unit/plugins/modules/test_proxmox_tasks_info.py
+++ /dev/null
@@ -1,206 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# Copyright (c) 2021, Andreas Botzner (@paginabianca)
-# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
-# SPDX-License-Identifier: GPL-3.0-or-later
-#
-# Proxmox Tasks module unit tests.
-# The API responses used in these tests were recorded from PVE version 6.4-8
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-import json
-import sys
-
-import pytest
-
-proxmoxer = pytest.importorskip('proxmoxer')
-mandatory_py_version = pytest.mark.skipif(
- sys.version_info < (2, 7),
- reason='The proxmoxer dependency requires python2.7 or higher'
-)
-
-from ansible_collections.community.general.plugins.modules import proxmox_tasks_info
-import ansible_collections.community.general.plugins.module_utils.proxmox as proxmox_utils
-from ansible_collections.community.internal_test_tools.tests.unit.compat.mock import patch
-from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import set_module_args
-
-NODE = 'node01'
-TASK_UPID = 'UPID:iaclab-01-01:000029DD:1599528B:6108F068:srvreload:networking:root@pam:'
-TASKS = [
- {
- "endtime": 1629092710,
- "id": "networking",
- "node": "iaclab-01-01",
- "pid": 3539,
- "pstart": 474062216,
- "starttime": 1629092709,
- "status": "OK",
- "type": "srvreload",
- "upid": "UPID:iaclab-01-01:00000DD3:1C419D88:6119FB65:srvreload:networking:root@pam:",
- "user": "root@pam"
- },
- {
- "endtime": 1627975785,
- "id": "networking",
- "node": "iaclab-01-01",
- "pid": 10717,
- "pstart": 362369675,
- "starttime": 1627975784,
- "status": "command 'ifreload -a' failed: exit code 1",
- "type": "srvreload",
- "upid": "UPID:iaclab-01-01:000029DD:1599528B:6108F068:srvreload:networking:root@pam:",
- "user": "root@pam"
- },
- {
- "endtime": 1627975503,
- "id": "networking",
- "node": "iaclab-01-01",
- "pid": 6778,
- "pstart": 362341540,
- "starttime": 1627975503,
- "status": "OK",
- "type": "srvreload",
- "upid": "UPID:iaclab-01-01:00001A7A:1598E4A4:6108EF4F:srvreload:networking:root@pam:",
- "user": "root@pam"
- }
-]
-EXPECTED_TASKS = [
- {
- "endtime": 1629092710,
- "id": "networking",
- "node": "iaclab-01-01",
- "pid": 3539,
- "pstart": 474062216,
- "starttime": 1629092709,
- "status": "OK",
- "type": "srvreload",
- "upid": "UPID:iaclab-01-01:00000DD3:1C419D88:6119FB65:srvreload:networking:root@pam:",
- "user": "root@pam",
- "failed": False
- },
- {
- "endtime": 1627975785,
- "id": "networking",
- "node": "iaclab-01-01",
- "pid": 10717,
- "pstart": 362369675,
- "starttime": 1627975784,
- "status": "command 'ifreload -a' failed: exit code 1",
- "type": "srvreload",
- "upid": "UPID:iaclab-01-01:000029DD:1599528B:6108F068:srvreload:networking:root@pam:",
- "user": "root@pam",
- "failed": True
- },
- {
- "endtime": 1627975503,
- "id": "networking",
- "node": "iaclab-01-01",
- "pid": 6778,
- "pstart": 362341540,
- "starttime": 1627975503,
- "status": "OK",
- "type": "srvreload",
- "upid": "UPID:iaclab-01-01:00001A7A:1598E4A4:6108EF4F:srvreload:networking:root@pam:",
- "user": "root@pam",
- "failed": False
- }
-]
-
-EXPECTED_SINGLE_TASK = [
- {
- "endtime": 1627975785,
- "id": "networking",
- "node": "iaclab-01-01",
- "pid": 10717,
- "pstart": 362369675,
- "starttime": 1627975784,
- "status": "command 'ifreload -a' failed: exit code 1",
- "type": "srvreload",
- "upid": "UPID:iaclab-01-01:000029DD:1599528B:6108F068:srvreload:networking:root@pam:",
- "user": "root@pam",
- "failed": True
- },
-]
-
-
-@patch('ansible_collections.community.general.plugins.module_utils.proxmox.ProxmoxAnsible._connect')
-def test_without_required_parameters(connect_mock, capfd, mocker):
- with set_module_args({}):
- with pytest.raises(SystemExit):
- proxmox_tasks_info.main()
- out, err = capfd.readouterr()
- assert not err
- assert json.loads(out)['failed']
-
-
-def mock_api_tasks_response(mocker):
- m = mocker.MagicMock()
- g = mocker.MagicMock()
- m.nodes = mocker.MagicMock(return_value=g)
- g.tasks.get = mocker.MagicMock(return_value=TASKS)
- return m
-
-
-@patch('ansible_collections.community.general.plugins.module_utils.proxmox.ProxmoxAnsible._connect')
-def test_get_tasks(connect_mock, capfd, mocker):
- with set_module_args({
- 'api_host': 'proxmoxhost',
- 'api_user': 'root@pam',
- 'api_password': 'supersecret',
- 'node': NODE
- }):
- connect_mock.side_effect = lambda: mock_api_tasks_response(mocker)
- proxmox_utils.HAS_PROXMOXER = True
-
- with pytest.raises(SystemExit):
- proxmox_tasks_info.main()
- out, err = capfd.readouterr()
- assert not err
- assert len(json.loads(out)['proxmox_tasks']) != 0
- assert not json.loads(out)['changed']
-
-
-@patch('ansible_collections.community.general.plugins.module_utils.proxmox.ProxmoxAnsible._connect')
-def test_get_single_task(connect_mock, capfd, mocker):
- with set_module_args({
- 'api_host': 'proxmoxhost',
- 'api_user': 'root@pam',
- 'api_password': 'supersecret',
- 'node': NODE,
- 'task': TASK_UPID
- }):
- connect_mock.side_effect = lambda: mock_api_tasks_response(mocker)
- proxmox_utils.HAS_PROXMOXER = True
-
- with pytest.raises(SystemExit):
- proxmox_tasks_info.main()
- out, err = capfd.readouterr()
- assert not err
- assert len(json.loads(out)['proxmox_tasks']) == 1
- assert json.loads(out)
- assert not json.loads(out)['changed']
-
-
-@patch('ansible_collections.community.general.plugins.module_utils.proxmox.ProxmoxAnsible._connect')
-def test_get_non_existent_task(connect_mock, capfd, mocker):
- with set_module_args({
- 'api_host': 'proxmoxhost',
- 'api_user': 'root@pam',
- 'api_password': 'supersecret',
- 'node': NODE,
- 'task': 'UPID:nonexistent'
- }):
- connect_mock.side_effect = lambda: mock_api_tasks_response(mocker)
- proxmox_utils.HAS_PROXMOXER = True
-
- with pytest.raises(SystemExit):
- proxmox_tasks_info.main()
- out, err = capfd.readouterr()
- assert not err
- assert json.loads(out)['failed']
- assert 'proxmox_tasks' not in json.loads(out)
- assert not json.loads(out)['changed']
- assert json.loads(
- out)['msg'] == 'Task: UPID:nonexistent does not exist on node: node01.'
diff --git a/tests/unit/plugins/modules/test_proxmox_template.py b/tests/unit/plugins/modules/test_proxmox_template.py
deleted file mode 100644
index 3e20213e8b..0000000000
--- a/tests/unit/plugins/modules/test_proxmox_template.py
+++ /dev/null
@@ -1,66 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# Copyright (c) 2023, Sergei Antipov
-# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-from __future__ import absolute_import, division, print_function
-
-__metaclass__ = type
-
-import os
-import sys
-
-import pytest
-
-proxmoxer = pytest.importorskip('proxmoxer')
-mandatory_py_version = pytest.mark.skipif(
- sys.version_info < (2, 7),
- reason='The proxmoxer dependency requires python2.7 or higher'
-)
-
-from ansible_collections.community.general.plugins.modules import proxmox_template
-from ansible_collections.community.internal_test_tools.tests.unit.compat.mock import patch, Mock
-from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import (
- AnsibleFailJson,
- ModuleTestCase,
- set_module_args,
-)
-import ansible_collections.community.general.plugins.module_utils.proxmox as proxmox_utils
-
-
-class TestProxmoxTemplateModule(ModuleTestCase):
- def setUp(self):
- super(TestProxmoxTemplateModule, self).setUp()
- proxmox_utils.HAS_PROXMOXER = True
- self.module = proxmox_template
- self.connect_mock = patch(
- "ansible_collections.community.general.plugins.module_utils.proxmox.ProxmoxAnsible._connect"
- )
- self.connect_mock.start()
-
- def tearDown(self):
- self.connect_mock.stop()
- super(TestProxmoxTemplateModule, self).tearDown()
-
- @patch("os.stat")
- @patch.multiple(os.path, exists=Mock(return_value=True), isfile=Mock(return_value=True))
- def test_module_fail_when_toolbelt_not_installed_and_file_size_is_big(self, mock_stat):
- self.module.HAS_REQUESTS_TOOLBELT = False
- mock_stat.return_value.st_size = 268435460
- with set_module_args(
- {
- "api_host": "host",
- "api_user": "user",
- "api_password": "password",
- "node": "pve",
- "src": "/tmp/mock.iso",
- "content_type": "iso"
- }
- ):
- with pytest.raises(AnsibleFailJson) as exc_info:
- self.module.main()
-
- result = exc_info.value.args[0]
- assert result["failed"] is True
- assert result["msg"] == "'requests_toolbelt' module is required to upload files larger than 256MB"
diff --git a/tests/unit/plugins/modules/test_proxmox_vm_info.py b/tests/unit/plugins/modules/test_proxmox_vm_info.py
deleted file mode 100644
index 8d2d0ab2ad..0000000000
--- a/tests/unit/plugins/modules/test_proxmox_vm_info.py
+++ /dev/null
@@ -1,714 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# Copyright (c) 2023, Sergei Antipov
-# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-from __future__ import absolute_import, division, print_function
-
-__metaclass__ = type
-
-import sys
-
-import pytest
-
-proxmoxer = pytest.importorskip("proxmoxer")
-mandatory_py_version = pytest.mark.skipif(
- sys.version_info < (2, 7),
- reason="The proxmoxer dependency requires python2.7 or higher",
-)
-
-from ansible_collections.community.general.plugins.modules import proxmox_vm_info
-from ansible_collections.community.internal_test_tools.tests.unit.compat.mock import patch
-from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import (
- AnsibleExitJson,
- AnsibleFailJson,
- ModuleTestCase,
- set_module_args,
-)
-import ansible_collections.community.general.plugins.module_utils.proxmox as proxmox_utils
-
-NODE1 = "pve"
-NODE2 = "pve2"
-RAW_CLUSTER_OUTPUT = [
- {
- "cpu": 0.174069059487628,
- "disk": 0,
- "diskread": 6656,
- "diskwrite": 0,
- "id": "qemu/100",
- "maxcpu": 1,
- "maxdisk": 34359738368,
- "maxmem": 4294967296,
- "mem": 35304543,
- "name": "pxe.home.arpa",
- "netin": 416956,
- "netout": 17330,
- "node": NODE1,
- "status": "running",
- "template": 0,
- "type": "qemu",
- "uptime": 669,
- "vmid": 100,
- },
- {
- "cpu": 0,
- "disk": 0,
- "diskread": 0,
- "diskwrite": 0,
- "id": "qemu/101",
- "maxcpu": 1,
- "maxdisk": 0,
- "maxmem": 536870912,
- "mem": 0,
- "name": "test1",
- "netin": 0,
- "netout": 0,
- "node": NODE2,
- "pool": "pool1",
- "status": "stopped",
- "template": 0,
- "type": "qemu",
- "uptime": 0,
- "vmid": 101,
- },
- {
- "cpu": 0,
- "disk": 352190464,
- "diskread": 0,
- "diskwrite": 0,
- "id": "lxc/102",
- "maxcpu": 2,
- "maxdisk": 10737418240,
- "maxmem": 536870912,
- "mem": 28192768,
- "name": "test-lxc.home.arpa",
- "netin": 102757,
- "netout": 446,
- "node": NODE1,
- "status": "running",
- "template": 0,
- "type": "lxc",
- "uptime": 161,
- "vmid": 102,
- },
- {
- "cpu": 0,
- "disk": 0,
- "diskread": 0,
- "diskwrite": 0,
- "id": "lxc/103",
- "maxcpu": 2,
- "maxdisk": 10737418240,
- "maxmem": 536870912,
- "mem": 0,
- "name": "test1-lxc.home.arpa",
- "netin": 0,
- "netout": 0,
- "node": NODE2,
- "pool": "pool1",
- "status": "stopped",
- "template": 0,
- "type": "lxc",
- "uptime": 0,
- "vmid": 103,
- },
- {
- "cpu": 0,
- "disk": 0,
- "diskread": 0,
- "diskwrite": 0,
- "id": "lxc/104",
- "maxcpu": 2,
- "maxdisk": 10737418240,
- "maxmem": 536870912,
- "mem": 0,
- "name": "test-lxc.home.arpa",
- "netin": 0,
- "netout": 0,
- "node": NODE2,
- "pool": "pool1",
- "status": "stopped",
- "template": 0,
- "type": "lxc",
- "uptime": 0,
- "vmid": 104,
- },
- {
- "cpu": 0,
- "disk": 0,
- "diskread": 0,
- "diskwrite": 0,
- "id": "lxc/105",
- "maxcpu": 2,
- "maxdisk": 10737418240,
- "maxmem": 536870912,
- "mem": 0,
- "name": "",
- "netin": 0,
- "netout": 0,
- "node": NODE2,
- "pool": "pool1",
- "status": "stopped",
- "template": 0,
- "type": "lxc",
- "uptime": 0,
- "vmid": 105,
- },
-]
-RAW_LXC_OUTPUT = [
- {
- "cpu": 0,
- "cpus": 2,
- "disk": 0,
- "diskread": 0,
- "diskwrite": 0,
- "maxdisk": 10737418240,
- "maxmem": 536870912,
- "maxswap": 536870912,
- "mem": 0,
- "name": "test1-lxc.home.arpa",
- "netin": 0,
- "netout": 0,
- "status": "stopped",
- "swap": 0,
- "type": "lxc",
- "uptime": 0,
- "vmid": "103",
- },
- {
- "cpu": 0,
- "cpus": 2,
- "disk": 352190464,
- "diskread": 0,
- "diskwrite": 0,
- "maxdisk": 10737418240,
- "maxmem": 536870912,
- "maxswap": 536870912,
- "mem": 28192768,
- "name": "test-lxc.home.arpa",
- "netin": 102757,
- "netout": 446,
- "pid": 4076752,
- "status": "running",
- "swap": 0,
- "type": "lxc",
- "uptime": 161,
- "vmid": "102",
- },
- {
- "cpu": 0,
- "cpus": 2,
- "disk": 0,
- "diskread": 0,
- "diskwrite": 0,
- "maxdisk": 10737418240,
- "maxmem": 536870912,
- "maxswap": 536870912,
- "mem": 0,
- "name": "test-lxc.home.arpa",
- "netin": 0,
- "netout": 0,
- "status": "stopped",
- "swap": 0,
- "type": "lxc",
- "uptime": 0,
- "vmid": "104",
- },
- {
- "cpu": 0,
- "cpus": 2,
- "disk": 0,
- "diskread": 0,
- "diskwrite": 0,
- "maxdisk": 10737418240,
- "maxmem": 536870912,
- "maxswap": 536870912,
- "mem": 0,
- "name": "",
- "netin": 0,
- "netout": 0,
- "status": "stopped",
- "swap": 0,
- "type": "lxc",
- "uptime": 0,
- "vmid": "105",
- },
-]
-RAW_QEMU_OUTPUT = [
- {
- "cpu": 0,
- "cpus": 1,
- "disk": 0,
- "diskread": 0,
- "diskwrite": 0,
- "maxdisk": 0,
- "maxmem": 536870912,
- "mem": 0,
- "name": "test1",
- "netin": 0,
- "netout": 0,
- "status": "stopped",
- "uptime": 0,
- "vmid": 101,
- },
- {
- "cpu": 0.174069059487628,
- "cpus": 1,
- "disk": 0,
- "diskread": 6656,
- "diskwrite": 0,
- "maxdisk": 34359738368,
- "maxmem": 4294967296,
- "mem": 35304543,
- "name": "pxe.home.arpa",
- "netin": 416956,
- "netout": 17330,
- "pid": 4076688,
- "status": "running",
- "uptime": 669,
- "vmid": 100,
- },
-]
-EXPECTED_VMS_OUTPUT = [
- {
- "cpu": 0.174069059487628,
- "cpus": 1,
- "disk": 0,
- "diskread": 6656,
- "diskwrite": 0,
- "id": "qemu/100",
- "maxcpu": 1,
- "maxdisk": 34359738368,
- "maxmem": 4294967296,
- "mem": 35304543,
- "name": "pxe.home.arpa",
- "netin": 416956,
- "netout": 17330,
- "node": NODE1,
- "pid": 4076688,
- "status": "running",
- "template": False,
- "type": "qemu",
- "uptime": 669,
- "vmid": 100,
- },
- {
- "cpu": 0,
- "cpus": 1,
- "disk": 0,
- "diskread": 0,
- "diskwrite": 0,
- "id": "qemu/101",
- "maxcpu": 1,
- "maxdisk": 0,
- "maxmem": 536870912,
- "mem": 0,
- "name": "test1",
- "netin": 0,
- "netout": 0,
- "node": NODE2,
- "pool": "pool1",
- "status": "stopped",
- "template": False,
- "type": "qemu",
- "uptime": 0,
- "vmid": 101,
- },
- {
- "cpu": 0,
- "cpus": 2,
- "disk": 352190464,
- "diskread": 0,
- "diskwrite": 0,
- "id": "lxc/102",
- "maxcpu": 2,
- "maxdisk": 10737418240,
- "maxmem": 536870912,
- "maxswap": 536870912,
- "mem": 28192768,
- "name": "test-lxc.home.arpa",
- "netin": 102757,
- "netout": 446,
- "node": NODE1,
- "pid": 4076752,
- "status": "running",
- "swap": 0,
- "template": False,
- "type": "lxc",
- "uptime": 161,
- "vmid": 102,
- },
- {
- "cpu": 0,
- "cpus": 2,
- "disk": 0,
- "diskread": 0,
- "diskwrite": 0,
- "id": "lxc/103",
- "maxcpu": 2,
- "maxdisk": 10737418240,
- "maxmem": 536870912,
- "maxswap": 536870912,
- "mem": 0,
- "name": "test1-lxc.home.arpa",
- "netin": 0,
- "netout": 0,
- "node": NODE2,
- "pool": "pool1",
- "status": "stopped",
- "swap": 0,
- "template": False,
- "type": "lxc",
- "uptime": 0,
- "vmid": 103,
- },
- {
- "cpu": 0,
- "cpus": 2,
- "disk": 0,
- "diskread": 0,
- "diskwrite": 0,
- "id": "lxc/104",
- "maxcpu": 2,
- "maxdisk": 10737418240,
- "maxmem": 536870912,
- "maxswap": 536870912,
- "mem": 0,
- "name": "test-lxc.home.arpa",
- "netin": 0,
- "netout": 0,
- "node": NODE2,
- "pool": "pool1",
- "status": "stopped",
- "swap": 0,
- "template": False,
- "type": "lxc",
- "uptime": 0,
- "vmid": 104,
- },
- {
- "cpu": 0,
- "cpus": 2,
- "disk": 0,
- "diskread": 0,
- "diskwrite": 0,
- "id": "lxc/105",
- "maxcpu": 2,
- "maxdisk": 10737418240,
- "maxmem": 536870912,
- "maxswap": 536870912,
- "mem": 0,
- "name": "",
- "netin": 0,
- "netout": 0,
- "node": NODE2,
- "pool": "pool1",
- "status": "stopped",
- "swap": 0,
- "template": False,
- "type": "lxc",
- "uptime": 0,
- "vmid": 105,
- },
-]
-
-
-def get_module_args(type="all", node=None, vmid=None, name=None, config="none"):
- return {
- "api_host": "host",
- "api_user": "user",
- "api_password": "password",
- "node": node,
- "type": type,
- "vmid": vmid,
- "name": name,
- "config": config,
- }
-
-
-class TestProxmoxVmInfoModule(ModuleTestCase):
- def setUp(self):
- super(TestProxmoxVmInfoModule, self).setUp()
- proxmox_utils.HAS_PROXMOXER = True
- self.module = proxmox_vm_info
- self.connect_mock = patch(
- "ansible_collections.community.general.plugins.module_utils.proxmox.ProxmoxAnsible._connect",
- ).start()
- self.connect_mock.return_value.nodes.return_value.lxc.return_value.get.return_value = (
- RAW_LXC_OUTPUT
- )
- self.connect_mock.return_value.nodes.return_value.qemu.return_value.get.return_value = (
- RAW_QEMU_OUTPUT
- )
- self.connect_mock.return_value.cluster.return_value.resources.return_value.get.return_value = (
- RAW_CLUSTER_OUTPUT
- )
- self.connect_mock.return_value.nodes.get.return_value = [{"node": NODE1}]
-
- def tearDown(self):
- self.connect_mock.stop()
- super(TestProxmoxVmInfoModule, self).tearDown()
-
- def test_module_fail_when_required_args_missing(self):
- with pytest.raises(AnsibleFailJson) as exc_info:
- with set_module_args({}):
- self.module.main()
-
- result = exc_info.value.args[0]
- assert result["msg"] == "missing required arguments: api_host, api_user"
-
- def test_get_lxc_vms_information(self):
- with pytest.raises(AnsibleExitJson) as exc_info:
- with set_module_args(get_module_args(type="lxc")):
- expected_output = [vm for vm in EXPECTED_VMS_OUTPUT if vm["type"] == "lxc"]
- self.module.main()
-
- result = exc_info.value.args[0]
- assert result["changed"] is False
- assert result["proxmox_vms"] == expected_output
-
- def test_get_qemu_vms_information(self):
- with pytest.raises(AnsibleExitJson) as exc_info:
- with set_module_args(get_module_args(type="qemu")):
- expected_output = [vm for vm in EXPECTED_VMS_OUTPUT if vm["type"] == "qemu"]
- self.module.main()
-
- result = exc_info.value.args[0]
- assert result["proxmox_vms"] == expected_output
-
- def test_get_all_vms_information(self):
- with pytest.raises(AnsibleExitJson) as exc_info:
- with set_module_args(get_module_args()):
- self.module.main()
-
- result = exc_info.value.args[0]
- assert result["proxmox_vms"] == EXPECTED_VMS_OUTPUT
-
- def test_vmid_is_converted_to_int(self):
- with pytest.raises(AnsibleExitJson) as exc_info:
- with set_module_args(get_module_args(type="lxc")):
- self.module.main()
-
- result = exc_info.value.args[0]
- assert isinstance(result["proxmox_vms"][0]["vmid"], int)
-
- def test_get_specific_lxc_vm_information(self):
- with pytest.raises(AnsibleExitJson) as exc_info:
- vmid = 102
- expected_output = [
- vm
- for vm in EXPECTED_VMS_OUTPUT
- if vm["vmid"] == vmid and vm["type"] == "lxc"
- ]
- with set_module_args(get_module_args(type="lxc", vmid=vmid)):
- self.module.main()
-
- result = exc_info.value.args[0]
- assert result["proxmox_vms"] == expected_output
- assert len(result["proxmox_vms"]) == 1
-
- def test_get_specific_qemu_vm_information(self):
- with pytest.raises(AnsibleExitJson) as exc_info:
- vmid = 100
- expected_output = [
- vm
- for vm in EXPECTED_VMS_OUTPUT
- if vm["vmid"] == vmid and vm["type"] == "qemu"
- ]
- with set_module_args(get_module_args(type="qemu", vmid=vmid)):
- self.module.main()
-
- result = exc_info.value.args[0]
- assert result["proxmox_vms"] == expected_output
- assert len(result["proxmox_vms"]) == 1
-
- def test_get_specific_vm_information(self):
- with pytest.raises(AnsibleExitJson) as exc_info:
- vmid = 100
- expected_output = [vm for vm in EXPECTED_VMS_OUTPUT if vm["vmid"] == vmid]
- with set_module_args(get_module_args(type="all", vmid=vmid)):
- self.module.main()
-
- result = exc_info.value.args[0]
- assert result["proxmox_vms"] == expected_output
- assert len(result["proxmox_vms"]) == 1
-
- def test_get_specific_vm_information_by_using_name(self):
- name = "test1-lxc.home.arpa"
- self.connect_mock.return_value.cluster.resources.get.return_value = [
- {"name": name, "vmid": "103"}
- ]
-
- with pytest.raises(AnsibleExitJson) as exc_info:
- expected_output = [vm for vm in EXPECTED_VMS_OUTPUT if vm["name"] == name]
- with set_module_args(get_module_args(type="all", name=name)):
- self.module.main()
-
- result = exc_info.value.args[0]
- assert result["proxmox_vms"] == expected_output
- assert len(result["proxmox_vms"]) == 1
-
- def test_get_multiple_vms_with_the_same_name(self):
- name = "test-lxc.home.arpa"
- self.connect_mock.return_value.cluster.resources.get.return_value = [
- {"name": name, "vmid": "102"},
- {"name": name, "vmid": "104"},
- ]
-
- with pytest.raises(AnsibleExitJson) as exc_info:
- expected_output = [vm for vm in EXPECTED_VMS_OUTPUT if vm["name"] == name]
- with set_module_args(get_module_args(type="all", name=name)):
- self.module.main()
-
- result = exc_info.value.args[0]
- assert result["proxmox_vms"] == expected_output
- assert len(result["proxmox_vms"]) == 2
-
- def test_get_vm_with_an_empty_name(self):
- name = ""
- self.connect_mock.return_value.cluster.resources.get.return_value = [
- {"name": name, "vmid": "105"},
- ]
-
- with pytest.raises(AnsibleExitJson) as exc_info:
- expected_output = [vm for vm in EXPECTED_VMS_OUTPUT if vm["name"] == name]
- with set_module_args(get_module_args(type="all", name=name)):
- self.module.main()
-
- result = exc_info.value.args[0]
- assert result["proxmox_vms"] == expected_output
- assert len(result["proxmox_vms"]) == 1
-
- def test_get_all_lxc_vms_from_specific_node(self):
- with pytest.raises(AnsibleExitJson) as exc_info:
- expected_output = [
- vm
- for vm in EXPECTED_VMS_OUTPUT
- if vm["node"] == NODE1 and vm["type"] == "lxc"
- ]
- with set_module_args(get_module_args(type="lxc", node=NODE1)):
- self.module.main()
-
- result = exc_info.value.args[0]
- assert result["proxmox_vms"] == expected_output
- assert len(result["proxmox_vms"]) == 1
-
- def test_get_all_qemu_vms_from_specific_node(self):
- with pytest.raises(AnsibleExitJson) as exc_info:
- expected_output = [
- vm
- for vm in EXPECTED_VMS_OUTPUT
- if vm["node"] == NODE1 and vm["type"] == "qemu"
- ]
- with set_module_args(get_module_args(type="qemu", node=NODE1)):
- self.module.main()
-
- result = exc_info.value.args[0]
- assert result["proxmox_vms"] == expected_output
- assert len(result["proxmox_vms"]) == 1
-
- def test_get_all_vms_from_specific_node(self):
- with pytest.raises(AnsibleExitJson) as exc_info:
- expected_output = [vm for vm in EXPECTED_VMS_OUTPUT if vm["node"] == NODE1]
- with set_module_args(get_module_args(node=NODE1)):
- self.module.main()
-
- result = exc_info.value.args[0]
- assert result["proxmox_vms"] == expected_output
- assert len(result["proxmox_vms"]) == 2
-
- def test_module_returns_empty_list_when_vm_does_not_exist(self):
- with pytest.raises(AnsibleExitJson) as exc_info:
- vmid = 200
- with set_module_args(get_module_args(type="all", vmid=vmid)):
- self.module.main()
-
- result = exc_info.value.args[0]
- assert result["proxmox_vms"] == []
-
- def test_module_fail_when_qemu_request_fails(self):
- self.connect_mock.return_value.nodes.return_value.qemu.return_value.get.side_effect = IOError(
- "Some mocked connection error."
- )
- with pytest.raises(AnsibleFailJson) as exc_info:
- with set_module_args(get_module_args(type="qemu")):
- self.module.main()
-
- result = exc_info.value.args[0]
- assert "Failed to retrieve QEMU VMs information:" in result["msg"]
-
- def test_module_fail_when_lxc_request_fails(self):
- self.connect_mock.return_value.nodes.return_value.lxc.return_value.get.side_effect = IOError(
- "Some mocked connection error."
- )
- with pytest.raises(AnsibleFailJson) as exc_info:
- with set_module_args(get_module_args(type="lxc")):
- self.module.main()
-
- result = exc_info.value.args[0]
- assert "Failed to retrieve LXC VMs information:" in result["msg"]
-
- def test_module_fail_when_cluster_resources_request_fails(self):
- self.connect_mock.return_value.cluster.return_value.resources.return_value.get.side_effect = IOError(
- "Some mocked connection error."
- )
- with pytest.raises(AnsibleFailJson) as exc_info:
- with set_module_args(get_module_args()):
- self.module.main()
-
- result = exc_info.value.args[0]
- assert (
- "Failed to retrieve VMs information from cluster resources:"
- in result["msg"]
- )
-
- def test_module_fail_when_node_does_not_exist(self):
- with pytest.raises(AnsibleFailJson) as exc_info:
- with set_module_args(get_module_args(type="all", node="NODE3")):
- self.module.main()
-
- result = exc_info.value.args[0]
- assert result["msg"] == "Node NODE3 doesn't exist in PVE cluster"
-
- def test_call_to_get_vmid_is_not_used_when_vmid_provided(self):
- with patch(
- "ansible_collections.community.general.plugins.module_utils.proxmox.ProxmoxAnsible.get_vmid"
- ) as get_vmid_mock:
- with pytest.raises(AnsibleExitJson):
- vmid = 100
- with set_module_args(
- get_module_args(type="all", vmid=vmid, name="something")
- ):
- self.module.main()
-
- assert get_vmid_mock.call_count == 0
-
- def test_config_returned_when_specified_qemu_vm_with_config(self):
- config_vm_value = {
- 'scsi0': 'local-lvm:vm-101-disk-0,iothread=1,size=32G',
- 'net0': 'virtio=4E:79:9F:A8:EE:E4,bridge=vmbr0,firewall=1',
- 'scsihw': 'virtio-scsi-single',
- 'cores': 1,
- 'name': 'test1',
- 'ostype': 'l26',
- 'boot': 'order=scsi0;ide2;net0',
- 'memory': 2048,
- 'sockets': 1,
- }
- (self.connect_mock.return_value.nodes.return_value.qemu.return_value.
- config.return_value.get.return_value) = config_vm_value
-
- with pytest.raises(AnsibleExitJson) as exc_info:
- vmid = 101
- with set_module_args(get_module_args(
- type="qemu",
- vmid=vmid,
- config="current",
- )):
- expected_output = [vm for vm in EXPECTED_VMS_OUTPUT if vm["vmid"] == vmid]
- expected_output[0]["config"] = config_vm_value
- self.module.main()
-
- result = exc_info.value.args[0]
- assert result["proxmox_vms"] == expected_output
diff --git a/tests/unit/plugins/modules/test_rundeck_acl_policy.py b/tests/unit/plugins/modules/test_rundeck_acl_policy.py
new file mode 100644
index 0000000000..564446cf3e
--- /dev/null
+++ b/tests/unit/plugins/modules/test_rundeck_acl_policy.py
@@ -0,0 +1,156 @@
+# -*- coding: utf-8 -*-
+
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import pytest
+from ansible_collections.community.general.plugins.modules import rundeck_acl_policy
+from ansible_collections.community.internal_test_tools.tests.unit.compat.mock import patch
+from ansible_collections.community.internal_test_tools.tests.unit.plugins.modules.utils import (
+ set_module_args,
+ AnsibleExitJson,
+ exit_json,
+ fail_json
+)
+
+
+@pytest.fixture(autouse=True)
+def module():
+ with patch.multiple(
+ "ansible.module_utils.basic.AnsibleModule",
+ exit_json=exit_json,
+ fail_json=fail_json,
+ ):
+ yield
+
+
+# define our two table entries: system ACL vs. project ACL
+PROJECT_TABLE = [
+ (None, "system/acl"),
+ ("test_project", "project/test_project/acl"),
+]
+
+
+@pytest.mark.parametrize("project, prefix", PROJECT_TABLE)
+@patch.object(rundeck_acl_policy, 'api_request')
+def test_acl_create(api_request_mock, project, prefix):
+ """Test creating a new ACL, both system-level and project-level."""
+ name = "my_policy"
+ policy = "test_policy_yaml"
+ # simulate: GET→404, POST→201, final GET→200
+ api_request_mock.side_effect = [
+ (None, {'status': 404}),
+ (None, {'status': 201}),
+ ({"contents": policy}, {'status': 200}),
+ ]
+ args = {
+ 'name': name,
+ 'url': "https://rundeck.example.org",
+ 'api_token': "mytoken",
+ 'policy': policy,
+ }
+ if project:
+ args['project'] = project
+
+ with pytest.raises(AnsibleExitJson):
+ with set_module_args(args):
+ rundeck_acl_policy.main()
+
+ # should have done GET → POST → GET
+ assert api_request_mock.call_count == 3
+ args, kwargs = api_request_mock.call_args_list[1]
+ assert kwargs['endpoint'] == "%s/%s.aclpolicy" % (prefix, name)
+ assert kwargs['method'] == 'POST'
+
+
+@pytest.mark.parametrize("project, prefix", PROJECT_TABLE)
+@patch.object(rundeck_acl_policy, 'api_request')
+def test_acl_unchanged(api_request_mock, project, prefix):
+ """Test no-op when existing ACL contents match the desired policy."""
+ name = "unchanged_policy"
+ policy = "same_policy_yaml"
+ # first GET returns matching contents
+ api_request_mock.return_value = ({"contents": policy}, {'status': 200})
+
+ args = {
+ 'name': name,
+ 'url': "https://rundeck.example.org",
+ 'api_token': "mytoken",
+ 'policy': policy,
+ }
+ if project:
+ args['project'] = project
+
+ with pytest.raises(AnsibleExitJson):
+ with set_module_args(args):
+ rundeck_acl_policy.main()
+
+ # only a single GET
+ assert api_request_mock.call_count == 1
+ args, kwargs = api_request_mock.call_args
+ assert kwargs['endpoint'] == "%s/%s.aclpolicy" % (prefix, name)
+ # default method is GET
+ assert kwargs.get('method', 'GET') == 'GET'
+
+
+@pytest.mark.parametrize("project, prefix", PROJECT_TABLE)
+@patch.object(rundeck_acl_policy, 'api_request')
+def test_acl_remove(api_request_mock, project, prefix):
+ """Test removing an existing ACL, both system- and project-level."""
+ name = "remove_me"
+ # GET finds it, DELETE removes it
+ api_request_mock.side_effect = [
+ ({"contents": "old_yaml"}, {'status': 200}),
+ (None, {'status': 204}),
+ ]
+
+ args = {
+ 'name': name,
+ 'url': "https://rundeck.example.org",
+ 'api_token': "mytoken",
+ 'state': 'absent',
+ }
+ if project:
+ args['project'] = project
+
+ with pytest.raises(AnsibleExitJson):
+ with set_module_args(args):
+ rundeck_acl_policy.main()
+
+ # GET → DELETE
+ assert api_request_mock.call_count == 2
+ args, kwargs = api_request_mock.call_args_list[1]
+ assert kwargs['endpoint'] == "%s/%s.aclpolicy" % (prefix, name)
+ assert kwargs['method'] == 'DELETE'
+
+
+@pytest.mark.parametrize("project, prefix", PROJECT_TABLE)
+@patch.object(rundeck_acl_policy, 'api_request')
+def test_acl_remove_nonexistent(api_request_mock, project, prefix):
+ """Test removing a non-existent ACL results in no change."""
+ name = "not_there"
+ # GET returns 404
+ api_request_mock.return_value = (None, {'status': 404})
+
+ args = {
+ 'name': name,
+ 'url': "https://rundeck.example.org",
+ 'api_token': "mytoken",
+ 'state': 'absent',
+ }
+ if project:
+ args['project'] = project
+
+ with pytest.raises(AnsibleExitJson):
+ with set_module_args(args):
+ rundeck_acl_policy.main()
+
+ # only the initial GET
+ assert api_request_mock.call_count == 1
+ args, kwargs = api_request_mock.call_args
+ assert kwargs['endpoint'] == "%s/%s.aclpolicy" % (prefix, name)
+ assert kwargs.get('method', 'GET') == 'GET'
diff --git a/tests/unit/plugins/modules/test_slack.py b/tests/unit/plugins/modules/test_slack.py
index e656ab902f..e0c87f907f 100644
--- a/tests/unit/plugins/modules/test_slack.py
+++ b/tests/unit/plugins/modules/test_slack.py
@@ -103,6 +103,22 @@ class TestSlackModule(ModuleTestCase):
self.assertTrue(fetch_url_mock.call_count, 1)
self.assertEqual(fetch_url_mock.call_args[1]['url'], "https://slack.com/api/chat.postMessage")
+ def test_govslack_message(self):
+ with set_module_args({
+ 'token': 'xoxa-123456789abcdef',
+ 'domain': 'slack-gov.com',
+ 'msg': 'test with ts'
+ }):
+ with patch.object(slack, "fetch_url") as fetch_url_mock:
+ mock_response = Mock()
+ mock_response.read.return_value = '{"fake":"data"}'
+ fetch_url_mock.return_value = (mock_response, {"status": 200})
+ with self.assertRaises(AnsibleExitJson):
+ self.module.main()
+
+ self.assertTrue(fetch_url_mock.call_count, 1)
+ self.assertEqual(fetch_url_mock.call_args[1]['url'], "https://slack-gov.com/api/chat.postMessage")
+
def test_edit_message(self):
with set_module_args({
'token': 'xoxa-123456789abcdef',
diff --git a/tests/unit/plugins/modules/test_xdg_mime.py b/tests/unit/plugins/modules/test_xdg_mime.py
new file mode 100644
index 0000000000..b897777632
--- /dev/null
+++ b/tests/unit/plugins/modules/test_xdg_mime.py
@@ -0,0 +1,15 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2025, Marcos Alano
+# Based on gio_mime module. Copyright (c) 2022, Alexei Znamensky
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+from ansible_collections.community.general.plugins.modules import xdg_mime
+from .uthelper import UTHelper, RunCommandMock
+
+
+UTHelper.from_module(xdg_mime, __name__, mocks=[RunCommandMock])
diff --git a/tests/unit/plugins/modules/test_xdg_mime.yaml b/tests/unit/plugins/modules/test_xdg_mime.yaml
new file mode 100644
index 0000000000..83bc15f901
--- /dev/null
+++ b/tests/unit/plugins/modules/test_xdg_mime.yaml
@@ -0,0 +1,89 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2025, Marcos Alano
+# Based on gio_mime module. Copyright (c) 2022, Alexei Znamensky
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# TODO: add tests for setting multiple mime types at once
+---
+anchors:
+ environ: &env-def {environ_update: {LANGUAGE: C, LC_ALL: C}, check_rc: true}
+ input: &input
+ mime_types: x-scheme-handler/http
+ handler: google-chrome.desktop
+ get_version: &get_version
+ command: [/testbin/xdg-mime, --version]
+ environ: *env-def
+ rc: 0
+ out: "xdg-mime 1.2.1\n"
+ err: ''
+ query_mime_type: &query_mime_type
+ command: [/testbin/xdg-mime, query, default, x-scheme-handler/http]
+ environ: *env-def
+ rc: 0
+ out: ''
+ err: ''
+ set_handler: &set_handler
+ command: [/testbin/xdg-mime, default, google-chrome.desktop, x-scheme-handler/http]
+ environ: *env-def
+ rc: 0
+ out: ''
+ err: ''
+test_cases:
+ - id: test_set_handler
+ input: *input
+ output:
+ current_handlers: ['']
+ changed: true
+ mocks:
+ run_command:
+ - *get_version
+ - *query_mime_type
+ - *set_handler
+ - id: test_set_handler_check
+ input: *input
+ output:
+ current_handlers: ['google-chrome.desktop']
+ changed: false
+ flags:
+ check: true
+ mocks:
+ run_command:
+ - *get_version
+ - <<: *query_mime_type
+ out: |
+ google-chrome.desktop
+ - id: test_set_handler_idempot
+ input: *input
+ output:
+ current_handlers: ['google-chrome.desktop']
+ changed: false
+ mocks:
+ run_command:
+ - *get_version
+ - <<: *query_mime_type
+ out: |
+ google-chrome.desktop
+ - id: test_set_handler_idempot_check
+ input: *input
+ output:
+ current_handlers: ['google-chrome.desktop']
+ changed: false
+ flags:
+ check: true
+ mocks:
+ run_command:
+ - *get_version
+ - <<: *query_mime_type
+ out: |
+ google-chrome.desktop
+ - id: test_set_invalid_handler
+ input:
+ <<: *input
+ handler: google-chrome.desktopX
+ output:
+ failed: true
+ msg: Handler must be a .desktop file
+ mocks:
+ run_command:
+ - *get_version
diff --git a/tests/unit/requirements.txt b/tests/unit/requirements.txt
index fb24975d7b..765c70af0a 100644
--- a/tests/unit/requirements.txt
+++ b/tests/unit/requirements.txt
@@ -47,19 +47,16 @@ elastic-apm ; python_version >= '3.6'
# requirements for scaleway modules
passlib[argon2]
-# requirements for the proxmox modules
-proxmoxer < 2.0.0 ; python_version >= '2.7' and python_version <= '3.6'
-proxmoxer ; python_version > '3.6'
-
-# requirements for the proxmox_pct_remote connection plugin
-paramiko >= 3.0.0 ; python_version >= '3.6'
-
#requirements for nomad_token modules
python-nomad < 2.0.0 ; python_version <= '3.6'
python-nomad >= 2.0.0 ; python_version >= '3.7'
# requirement for jenkins_build, jenkins_node, jenkins_plugin modules
-python-jenkins >= 0.4.12
+python-jenkins < 1.8.0 ; python_version < '3.8'
+python-jenkins >= 0.4.12 ; python_version >= '3.8'
# requirement for json_patch, json_patch_recipe and json_patch plugins
-jsonpatch
\ No newline at end of file
+jsonpatch
+
+# requirements for the wsl connection plugin
+paramiko >= 3.0.0 ; python_version >= '3.6'
diff --git a/tests/unit/requirements.yml b/tests/unit/requirements.yml
index 586a6a1b37..107fe12569 100644
--- a/tests/unit/requirements.yml
+++ b/tests/unit/requirements.yml
@@ -4,4 +4,4 @@
# SPDX-License-Identifier: GPL-3.0-or-later
collections:
-- community.internal_test_tools
+ - community.internal_test_tools
diff --git a/tests/utils/shippable/shippable.sh b/tests/utils/shippable/shippable.sh
index 4167d7ffc5..134ff6de4b 100755
--- a/tests/utils/shippable/shippable.sh
+++ b/tests/utils/shippable/shippable.sh
@@ -10,6 +10,7 @@ IFS='/:' read -ra args <<< "$1"
ansible_version="${args[0]}"
script="${args[1]}"
+after_script="${args[2]}"
function join {
local IFS="$1";
@@ -69,6 +70,14 @@ export ANSIBLE_COLLECTIONS_PATHS="${PWD}/../../../"
# START: HACK install dependencies
+COMMUNITY_CRYPTO_BRANCH=main
+if [ "${ansible_version}" == "2.16" ]; then
+ COMMUNITY_CRYPTO_BRANCH=stable-2
+fi
+if [ "${script}" == "linux" ] && [ "$after_script" == "ubuntu2004" ]; then
+ COMMUNITY_CRYPTO_BRANCH=stable-2
+fi
+
# Nothing further should be added to this list.
# This is to prevent modules or plugins in this collection having a runtime dependency on other collections.
retry git clone --depth=1 --single-branch https://github.com/ansible-collections/community.internal_test_tools.git "${ANSIBLE_COLLECTIONS_PATHS}/ansible_collections/community/internal_test_tools"
@@ -78,7 +87,7 @@ retry git clone --depth=1 --single-branch https://github.com/ansible-collections
if [ "${script}" != "sanity" ] && [ "${script}" != "units" ]; then
# To prevent Python dependencies on other collections only install other collections for integration tests
retry git clone --depth=1 --single-branch https://github.com/ansible-collections/ansible.posix.git "${ANSIBLE_COLLECTIONS_PATHS}/ansible_collections/ansible/posix"
- retry git clone --depth=1 --single-branch https://github.com/ansible-collections/community.crypto.git "${ANSIBLE_COLLECTIONS_PATHS}/ansible_collections/community/crypto"
+ retry git clone --depth=1 --single-branch --branch "${COMMUNITY_CRYPTO_BRANCH}" https://github.com/ansible-collections/community.crypto.git "${ANSIBLE_COLLECTIONS_PATHS}/ansible_collections/community/crypto"
retry git clone --depth=1 --single-branch https://github.com/ansible-collections/community.docker.git "${ANSIBLE_COLLECTIONS_PATHS}/ansible_collections/community/docker"
# NOTE: we're installing with git to work around Galaxy being a huge PITA (https://github.com/ansible/galaxy/issues/2429)
# retry ansible-galaxy -vvv collection install ansible.posix
@@ -159,10 +168,8 @@ function cleanup
ansible-test coverage xml --color -v --requirements --group-by command --group-by version ${stub:+"$stub"}
cp -a tests/output/reports/coverage=*.xml "$SHIPPABLE_RESULT_DIR/codecoverage/"
- if [ "${ansible_version}" != "2.9" ]; then
- # analyze and capture code coverage aggregated by integration test target
- ansible-test coverage analyze targets generate -v "$SHIPPABLE_RESULT_DIR/testresults/coverage-analyze-targets.json"
- fi
+ # analyze and capture code coverage aggregated by integration test target
+ ansible-test coverage analyze targets generate -v "$SHIPPABLE_RESULT_DIR/testresults/coverage-analyze-targets.json"
# upload coverage report to codecov.io only when using complete on-demand coverage
if [ "${COVERAGE}" == "--coverage" ] && [ "${CHANGED}" == "" ]; then